Merge V8 5.2.361.47  DO NOT MERGE

https://chromium.googlesource.com/v8/v8/+/5.2.361.47

FPIIM-449

Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/DEPS b/src/DEPS
index 1bb616e..00c7f1f 100644
--- a/src/DEPS
+++ b/src/DEPS
@@ -3,7 +3,7 @@
   "+src",
   "-src/compiler",
   "+src/compiler/pipeline.h",
-  "+src/compiler/code-stub-assembler.h",
+  "+src/compiler/code-assembler.h",
   "+src/compiler/wasm-compiler.h",
   "-src/heap",
   "+src/heap/heap.h",
diff --git a/src/accessors.cc b/src/accessors.cc
index 374c0a2..8b8753b 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -40,6 +40,11 @@
   Handle<Object> set = v8::FromCData(isolate, setter);
   info->set_getter(*get);
   info->set_setter(*set);
+  Address redirected = info->redirected_getter();
+  if (redirected != nullptr) {
+    Handle<Object> js_get = v8::FromCData(isolate, redirected);
+    info->set_js_getter(*js_get);
+  }
   return info;
 }
 
@@ -88,37 +93,6 @@
   Isolate* isolate = name->GetIsolate();
 
   switch (map->instance_type()) {
-    case JS_TYPED_ARRAY_TYPE: {
-      if (!CheckForName(name, isolate->factory()->length_string(),
-                        JSTypedArray::kLengthOffset, object_offset) &&
-          !CheckForName(name, isolate->factory()->byte_length_string(),
-                        JSTypedArray::kByteLengthOffset, object_offset) &&
-          !CheckForName(name, isolate->factory()->byte_offset_string(),
-                        JSTypedArray::kByteOffsetOffset, object_offset)) {
-        return false;
-      }
-
-      if (map->is_dictionary_map()) return false;
-
-      // Check if the property is overridden on the instance.
-      DescriptorArray* descriptors = map->instance_descriptors();
-      int descriptor = descriptors->SearchWithCache(isolate, *name, *map);
-      if (descriptor != DescriptorArray::kNotFound) return false;
-
-      Handle<Object> proto = Handle<Object>(map->prototype(), isolate);
-      if (!proto->IsJSReceiver()) return false;
-
-      // Check if the property is defined in the prototype chain.
-      LookupIterator it(proto, name);
-      if (!it.IsFound()) return false;
-
-      Object* original_proto =
-          JSFunction::cast(map->GetConstructor())->prototype();
-
-      // Property is not configurable. It is enough to verify that
-      // the holder is the same.
-      return *it.GetHolder<Object>() == original_proto;
-    }
     case JS_DATA_VIEW_TYPE:
       return CheckForName(name, isolate->factory()->byte_length_string(),
                           JSDataView::kByteLengthOffset, object_offset) ||
@@ -129,9 +103,11 @@
   }
 }
 
-MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
-    Isolate* isolate, Handle<JSObject> receiver, Handle<JSObject> holder,
-    Handle<Name> name, Handle<Object> value, bool observe) {
+namespace {
+
+MUST_USE_RESULT MaybeHandle<Object> ReplaceAccessorWithDataProperty(
+    Isolate* isolate, Handle<Object> receiver, Handle<JSObject> holder,
+    Handle<Name> name, Handle<Object> value) {
   LookupIterator it(receiver, name, holder,
                     LookupIterator::OWN_SKIP_INTERCEPTOR);
   // Skip any access checks we might hit. This accessor should never hit in a
@@ -140,37 +116,26 @@
     CHECK(it.HasAccess());
     it.Next();
   }
+  DCHECK(holder.is_identical_to(it.GetHolder<JSObject>()));
   CHECK_EQ(LookupIterator::ACCESSOR, it.state());
-
-  Handle<Object> old_value;
-  bool is_observed = observe && receiver->map()->is_observed();
-  if (is_observed) {
-    MaybeHandle<Object> maybe_old = Object::GetPropertyWithAccessor(&it);
-    if (!maybe_old.ToHandle(&old_value)) return maybe_old;
-  }
-
   it.ReconfigureDataProperty(value, it.property_attributes());
-
-  if (is_observed && !old_value->SameValue(*value)) {
-    return JSObject::EnqueueChangeRecord(receiver, "update", name, old_value);
-  }
-
   return value;
 }
 
+}  // namespace
+
 void Accessors::ReconfigureToDataProperty(
     v8::Local<v8::Name> key, v8::Local<v8::Value> val,
     const v8::PropertyCallbackInfo<void>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<JSObject> receiver =
-      Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+  Handle<Object> receiver = Utils::OpenHandle(*info.This());
   Handle<JSObject> holder =
       Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
   Handle<Name> name = Utils::OpenHandle(*key);
   Handle<Object> value = Utils::OpenHandle(*val);
-  MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
-      isolate, receiver, holder, name, value, false);
+  MaybeHandle<Object> result =
+      ReplaceAccessorWithDataProperty(isolate, receiver, holder, name, value);
   if (result.is_null()) isolate->OptionalRescheduleException(false);
 }
 
@@ -221,7 +186,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
 
-  Handle<JSReceiver> object = Utils::OpenHandle(*info.This());
+  Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
   Handle<JSArray> array = Handle<JSArray>::cast(object);
   Handle<Object> length_obj = Utils::OpenHandle(*val);
 
@@ -231,9 +196,7 @@
     return;
   }
 
-  if (JSArray::ObservableSetLength(array, length).is_null()) {
-    isolate->OptionalRescheduleException(false);
-  }
+  JSArray::SetLength(array, length);
 
   if (info.ShouldThrowOnError()) {
     uint32_t actual_new_len = 0;
@@ -305,7 +268,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* res = Smi::FromInt(
       Script::cast(JSValue::cast(object)->value())->column_offset());
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@@ -332,7 +295,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* id = Smi::FromInt(Script::cast(JSValue::cast(object)->value())->id());
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
 }
@@ -357,7 +320,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* source = Script::cast(JSValue::cast(object)->value())->name();
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
 }
@@ -381,7 +344,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* source = Script::cast(JSValue::cast(object)->value())->source();
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
 }
@@ -405,7 +368,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* res =
       Smi::FromInt(Script::cast(JSValue::cast(object)->value())->line_offset());
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@@ -432,7 +395,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* res =
       Smi::FromInt(Script::cast(JSValue::cast(object)->value())->type());
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@@ -458,7 +421,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* res = Smi::FromInt(
       Script::cast(JSValue::cast(object)->value())->compilation_type());
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@@ -484,7 +447,7 @@
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = Utils::OpenHandle(*info.This());
+  Handle<Object> object = Utils::OpenHandle(*info.Holder());
   Handle<Script> script(
       Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
   Script::InitLineEnds(script);
@@ -519,7 +482,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* url = Script::cast(JSValue::cast(object)->value())->source_url();
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
 }
@@ -543,7 +506,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* url =
       Script::cast(JSValue::cast(object)->value())->source_mapping_url();
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
@@ -567,7 +530,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
                                       ->origin_options()
                                       .IsEmbedderDebugScript();
@@ -596,7 +559,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.This());
+  Object* object = *Utils::OpenHandle(*info.Holder());
   Object* res = Script::cast(JSValue::cast(object)->value())->context_data();
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
 }
@@ -621,7 +584,7 @@
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = Utils::OpenHandle(*info.This());
+  Handle<Object> object = Utils::OpenHandle(*info.Holder());
   Handle<Script> script(
       Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
   Handle<Object> result = isolate->factory()->undefined_value();
@@ -657,16 +620,12 @@
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = Utils::OpenHandle(*info.This());
+  Handle<Object> object = Utils::OpenHandle(*info.Holder());
   Handle<Script> script(
       Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
   Handle<Object> result = isolate->factory()->undefined_value();
   if (script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
-    Handle<Code> code(SharedFunctionInfo::cast(
-        script->eval_from_shared())->code());
-    result = Handle<Object>(Smi::FromInt(code->SourcePosition(
-                                script->eval_from_instructions_offset())),
-                            isolate);
+    result = Handle<Object>(Smi::FromInt(script->GetEvalPosition()), isolate);
   }
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
@@ -691,17 +650,19 @@
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = Utils::OpenHandle(*info.This());
+  Handle<Object> object = Utils::OpenHandle(*info.Holder());
   Handle<Script> script(
       Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
-  Handle<Object> result;
-  Handle<SharedFunctionInfo> shared(
-      SharedFunctionInfo::cast(script->eval_from_shared()));
-  // Find the name of the function calling eval.
-  if (!shared->name()->IsUndefined()) {
-    result = Handle<Object>(shared->name(), isolate);
-  } else {
-    result = Handle<Object>(shared->inferred_name(), isolate);
+  Handle<Object> result = isolate->factory()->undefined_value();
+  if (!script->eval_from_shared()->IsUndefined()) {
+    Handle<SharedFunctionInfo> shared(
+        SharedFunctionInfo::cast(script->eval_from_shared()));
+    // Find the name of the function calling eval.
+    if (!shared->name()->IsUndefined()) {
+      result = Handle<Object>(shared->name(), isolate);
+    } else {
+      result = Handle<Object>(shared->inferred_name(), isolate);
+    }
   }
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
@@ -732,24 +693,8 @@
 
 MUST_USE_RESULT static MaybeHandle<Object> SetFunctionPrototype(
     Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
-  Handle<Object> old_value;
-  bool is_observed = function->map()->is_observed();
-  if (is_observed) {
-    if (function->has_prototype())
-      old_value = handle(function->prototype(), isolate);
-    else
-      old_value = isolate->factory()->NewFunctionPrototype(function);
-  }
-
   JSFunction::SetPrototype(function, value);
   DCHECK(function->prototype() == *value);
-
-  if (is_observed && !old_value->SameValue(*value)) {
-    MaybeHandle<Object> result = JSObject::EnqueueChangeRecord(
-        function, "update", isolate->factory()->prototype_string(), old_value);
-    if (result.is_null()) return MaybeHandle<Object>();
-  }
-
   return function;
 }
 
@@ -811,45 +756,19 @@
   HandleScope scope(isolate);
   Handle<JSFunction> function =
       Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
-
-  int length = 0;
-  if (function->shared()->is_compiled()) {
-    length = function->shared()->length();
-  } else {
-    // If the function isn't compiled yet, the length is not computed
-    // correctly yet. Compile it now and return the right length.
-    if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
-      length = function->shared()->length();
-    }
-    if (isolate->has_pending_exception()) {
-      isolate->OptionalRescheduleException(false);
-    }
+  Handle<Object> result;
+  if (!JSFunction::GetLength(isolate, function).ToHandle(&result)) {
+    result = handle(Smi::FromInt(0), isolate);
+    isolate->OptionalRescheduleException(false);
   }
-  Handle<Object> result(Smi::FromInt(length), isolate);
+
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
-void Accessors::ObservedReconfigureToDataProperty(
-    v8::Local<v8::Name> key, v8::Local<v8::Value> val,
-    const v8::PropertyCallbackInfo<void>& info) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
-  HandleScope scope(isolate);
-  Handle<JSObject> receiver =
-      Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
-  Handle<JSObject> holder =
-      Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
-  Handle<Name> name = Utils::OpenHandle(*key);
-  Handle<Object> value = Utils::OpenHandle(*val);
-  MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
-      isolate, receiver, holder, name, value, true);
-  if (result.is_null()) isolate->OptionalRescheduleException(false);
-}
-
-
 Handle<AccessorInfo> Accessors::FunctionLengthInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   return MakeAccessor(isolate, isolate->factory()->length_string(),
-                      &FunctionLengthGetter, &ObservedReconfigureToDataProperty,
+                      &FunctionLengthGetter, &ReconfigureToDataProperty,
                       attributes);
 }
 
@@ -866,19 +785,14 @@
   HandleScope scope(isolate);
   Handle<JSFunction> function =
       Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
-  Handle<Object> result;
-  if (function->shared()->name_should_print_as_anonymous()) {
-    result = isolate->factory()->anonymous_string();
-  } else {
-    result = handle(function->shared()->name(), isolate);
-  }
+  Handle<Object> result = JSFunction::GetName(isolate, function);
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
 Handle<AccessorInfo> Accessors::FunctionNameInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   return MakeAccessor(isolate, isolate->factory()->name_string(),
-                      &FunctionNameGetter, &ObservedReconfigureToDataProperty,
+                      &FunctionNameGetter, &ReconfigureToDataProperty,
                       attributes);
 }
 
@@ -1155,6 +1069,65 @@
 
 
 //
+// Accessors::BoundFunctionLength
+//
+
+void Accessors::BoundFunctionLengthGetter(
+    v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  HandleScope scope(isolate);
+  Handle<JSBoundFunction> function =
+      Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
+
+  Handle<Smi> target_length;
+  Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
+                            isolate);
+  if (!JSFunction::GetLength(isolate, target).ToHandle(&target_length)) {
+    target_length = handle(Smi::FromInt(0), isolate);
+    isolate->OptionalRescheduleException(false);
+    return;
+  }
+
+  int bound_length = function->bound_arguments()->length();
+  int length = Max(0, target_length->value() - bound_length);
+
+  Handle<Object> result(Smi::FromInt(length), isolate);
+  info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+Handle<AccessorInfo> Accessors::BoundFunctionLengthInfo(
+    Isolate* isolate, PropertyAttributes attributes) {
+  return MakeAccessor(isolate, isolate->factory()->length_string(),
+                      &BoundFunctionLengthGetter, &ReconfigureToDataProperty,
+                      attributes);
+}
+
+//
+// Accessors::BoundFunctionName
+//
+
+void Accessors::BoundFunctionNameGetter(
+    v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  HandleScope scope(isolate);
+  Handle<JSBoundFunction> function =
+      Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
+  Handle<Object> result;
+  if (!JSBoundFunction::GetName(isolate, function).ToHandle(&result)) {
+    isolate->OptionalRescheduleException(false);
+    return;
+  }
+  info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+Handle<AccessorInfo> Accessors::BoundFunctionNameInfo(
+    Isolate* isolate, PropertyAttributes attributes) {
+  return MakeAccessor(isolate, isolate->factory()->name_string(),
+                      &BoundFunctionNameGetter, &ReconfigureToDataProperty,
+                      attributes);
+}
+
+//
 // Accessors::MakeModuleExport
 //
 
diff --git a/src/accessors.h b/src/accessors.h
index 3fe550c..6a99934 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -22,6 +22,8 @@
 #define ACCESSOR_INFO_LIST(V)     \
   V(ArgumentsIterator)            \
   V(ArrayLength)                  \
+  V(BoundFunctionLength)          \
+  V(BoundFunctionName)            \
   V(FunctionArguments)            \
   V(FunctionCaller)               \
   V(FunctionName)                 \
@@ -46,7 +48,6 @@
 
 #define ACCESSOR_SETTER_LIST(V)        \
   V(ReconfigureToDataProperty)         \
-  V(ObservedReconfigureToDataProperty) \
   V(ArrayLengthSetter)                 \
   V(FunctionPrototypeSetter)
 
diff --git a/src/address-map.h b/src/address-map.h
index df32f89..017fc5d 100644
--- a/src/address-map.h
+++ b/src/address-map.h
@@ -40,7 +40,6 @@
   }
 };
 
-
 class RootIndexMap : public AddressMapBase {
  public:
   explicit RootIndexMap(Isolate* isolate);
@@ -59,123 +58,155 @@
   DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
 };
 
-
-class BackReference {
+class SerializerReference {
  public:
-  explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
+  SerializerReference() : bitfield_(Special(kInvalidValue)) {}
 
-  BackReference() : bitfield_(kInvalidValue) {}
-
-  static BackReference SourceReference() { return BackReference(kSourceValue); }
-
-  static BackReference GlobalProxyReference() {
-    return BackReference(kGlobalProxyValue);
+  static SerializerReference FromBitfield(uint32_t bitfield) {
+    return SerializerReference(bitfield);
   }
 
-  static BackReference LargeObjectReference(uint32_t index) {
-    return BackReference(SpaceBits::encode(LO_SPACE) |
-                         ChunkOffsetBits::encode(index));
-  }
-
-  static BackReference DummyReference() { return BackReference(kDummyValue); }
-
-  static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
-                                 uint32_t chunk_offset) {
+  static SerializerReference BackReference(AllocationSpace space,
+                                           uint32_t chunk_index,
+                                           uint32_t chunk_offset) {
     DCHECK(IsAligned(chunk_offset, kObjectAlignment));
     DCHECK_NE(LO_SPACE, space);
-    return BackReference(
+    return SerializerReference(
         SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
         ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
   }
 
-  bool is_valid() const { return bitfield_ != kInvalidValue; }
-  bool is_source() const { return bitfield_ == kSourceValue; }
-  bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
+  static SerializerReference LargeObjectReference(uint32_t index) {
+    return SerializerReference(SpaceBits::encode(LO_SPACE) |
+                               ValueIndexBits::encode(index));
+  }
+
+  static SerializerReference AttachedReference(uint32_t index) {
+    return SerializerReference(SpaceBits::encode(kAttachedReferenceSpace) |
+                               ValueIndexBits::encode(index));
+  }
+
+  static SerializerReference DummyReference() {
+    return SerializerReference(Special(kDummyValue));
+  }
+
+  bool is_valid() const { return bitfield_ != Special(kInvalidValue); }
+
+  bool is_back_reference() const {
+    return SpaceBits::decode(bitfield_) <= LAST_SPACE;
+  }
 
   AllocationSpace space() const {
-    DCHECK(is_valid());
-    return SpaceBits::decode(bitfield_);
+    DCHECK(is_back_reference());
+    return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
   }
 
   uint32_t chunk_offset() const {
-    DCHECK(is_valid());
+    DCHECK(is_back_reference());
     return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
   }
 
   uint32_t large_object_index() const {
-    DCHECK(is_valid());
+    DCHECK(is_back_reference());
     DCHECK(chunk_index() == 0);
     return ChunkOffsetBits::decode(bitfield_);
   }
 
   uint32_t chunk_index() const {
-    DCHECK(is_valid());
+    DCHECK(is_back_reference());
     return ChunkIndexBits::decode(bitfield_);
   }
 
-  uint32_t reference() const {
-    DCHECK(is_valid());
+  uint32_t back_reference() const {
+    DCHECK(is_back_reference());
     return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
   }
 
-  uint32_t bitfield() const { return bitfield_; }
+  bool is_attached_reference() const {
+    return SpaceBits::decode(bitfield_) == kAttachedReferenceSpace;
+  }
+
+  int attached_reference_index() const {
+    DCHECK(is_attached_reference());
+    return ValueIndexBits::decode(bitfield_);
+  }
 
  private:
-  static const uint32_t kInvalidValue = 0xFFFFFFFF;
-  static const uint32_t kSourceValue = 0xFFFFFFFE;
-  static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
-  static const uint32_t kDummyValue = 0xFFFFFFFC;
+  explicit SerializerReference(uint32_t bitfield) : bitfield_(bitfield) {}
+
+  inline static uint32_t Special(int value) {
+    return SpaceBits::encode(kSpecialValueSpace) |
+           ValueIndexBits::encode(value);
+  }
+
+  // We use the 32-bit bitfield to encode either a back reference, a special
+  // value, or an attached reference index.
+  // Back reference:
+  //   [ Space index             ] [ Chunk index ] [ Chunk offset ]
+  //   [ LO_SPACE                ] [ large object index           ]
+  // Special value
+  //   [ kSpecialValueSpace      ] [ Special value index          ]
+  // Attached reference
+  //   [ kAttachedReferenceSpace ] [ Attached reference index     ]
+
   static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
   static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
+  static const int kValueIndexSize = kChunkOffsetSize + kChunkIndexSize;
 
- public:
-  static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
+  static const int kSpecialValueSpace = LAST_SPACE + 1;
+  static const int kAttachedReferenceSpace = kSpecialValueSpace + 1;
+  STATIC_ASSERT(kAttachedReferenceSpace < (1 << kSpaceTagSize));
 
- private:
+  static const int kInvalidValue = 0;
+  static const int kDummyValue = 1;
+
+  // The chunk offset can also be used to encode the index of special values.
   class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
   class ChunkIndexBits
       : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
-  class SpaceBits
-      : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
-  };
+  class ValueIndexBits : public BitField<uint32_t, 0, kValueIndexSize> {};
+  STATIC_ASSERT(ChunkIndexBits::kNext == ValueIndexBits::kNext);
+  class SpaceBits : public BitField<int, kValueIndexSize, kSpaceTagSize> {};
+  STATIC_ASSERT(SpaceBits::kNext == 32);
 
   uint32_t bitfield_;
-};
 
+  friend class SerializerReferenceMap;
+};
 
 // Mapping objects to their location after deserialization.
 // This is used during building, but not at runtime by V8.
-class BackReferenceMap : public AddressMapBase {
+class SerializerReferenceMap : public AddressMapBase {
  public:
-  BackReferenceMap()
-      : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
+  SerializerReferenceMap()
+      : no_allocation_(),
+        map_(HashMap::PointersMatch),
+        attached_reference_index_(0) {}
 
-  ~BackReferenceMap() { delete map_; }
-
-  BackReference Lookup(HeapObject* obj) {
-    HashMap::Entry* entry = LookupEntry(map_, obj, false);
-    return entry ? BackReference(GetValue(entry)) : BackReference();
+  SerializerReference Lookup(HeapObject* obj) {
+    HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+    return entry ? SerializerReference(GetValue(entry)) : SerializerReference();
   }
 
-  void Add(HeapObject* obj, BackReference b) {
+  void Add(HeapObject* obj, SerializerReference b) {
     DCHECK(b.is_valid());
-    DCHECK_NULL(LookupEntry(map_, obj, false));
-    HashMap::Entry* entry = LookupEntry(map_, obj, true);
-    SetValue(entry, b.bitfield());
+    DCHECK_NULL(LookupEntry(&map_, obj, false));
+    HashMap::Entry* entry = LookupEntry(&map_, obj, true);
+    SetValue(entry, b.bitfield_);
   }
 
-  void AddSourceString(String* string) {
-    Add(string, BackReference::SourceReference());
-  }
-
-  void AddGlobalProxy(HeapObject* global_proxy) {
-    Add(global_proxy, BackReference::GlobalProxyReference());
+  SerializerReference AddAttachedReference(HeapObject* attached_reference) {
+    SerializerReference reference =
+        SerializerReference::AttachedReference(attached_reference_index_++);
+    Add(attached_reference, reference);
+    return reference;
   }
 
  private:
   DisallowHeapAllocation no_allocation_;
-  HashMap* map_;
-  DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
+  HashMap map_;
+  int attached_reference_index_;
+  DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
 };
 
 }  // namespace internal
diff --git a/src/api-arguments.cc b/src/api-arguments.cc
index c4b698c..71a0f60 100644
--- a/src/api-arguments.cc
+++ b/src/api-arguments.cc
@@ -9,10 +9,10 @@
 
 Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
   Isolate* isolate = this->isolate();
+  RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
-  FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_,
-                                       is_construct_call_);
+  FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
   f(info);
   return GetReturnValue<Object>(isolate);
 }
@@ -20,6 +20,7 @@
 Handle<JSObject> PropertyCallbackArguments::Call(
     IndexedPropertyEnumeratorCallback f) {
   Isolate* isolate = this->isolate();
+  RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
   PropertyCallbackInfo<v8::Array> info(begin());
diff --git a/src/api-arguments.h b/src/api-arguments.h
index 3bfe34d..57a2d98 100644
--- a/src/api-arguments.h
+++ b/src/api-arguments.h
@@ -116,6 +116,7 @@
 #define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn)         \
   Handle<InternalReturn> Call(Function f, Handle<Name> name) {               \
     Isolate* isolate = this->isolate();                                      \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function);       \
     VMState<EXTERNAL> state(isolate);                                        \
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));             \
     PropertyCallbackInfo<ApiReturn> info(begin());                           \
@@ -138,6 +139,7 @@
 #define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn)  \
   Handle<InternalReturn> Call(Function f, uint32_t index) {            \
     Isolate* isolate = this->isolate();                                \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
     VMState<EXTERNAL> state(isolate);                                  \
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));       \
     PropertyCallbackInfo<ApiReturn> info(begin());                     \
@@ -155,6 +157,8 @@
   Handle<Object> Call(GenericNamedPropertySetterCallback f, Handle<Name> name,
                       Handle<Object> value) {
     Isolate* isolate = this->isolate();
+    RuntimeCallTimerScope timer(
+        isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
     VMState<EXTERNAL> state(isolate);
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
     PropertyCallbackInfo<v8::Value> info(begin());
@@ -167,6 +171,8 @@
   Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
                       Handle<Object> value) {
     Isolate* isolate = this->isolate();
+    RuntimeCallTimerScope timer(
+        isolate, &RuntimeCallStats::IndexedPropertySetterCallback);
     VMState<EXTERNAL> state(isolate);
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
     PropertyCallbackInfo<v8::Value> info(begin());
@@ -179,6 +185,8 @@
   void Call(AccessorNameSetterCallback f, Handle<Name> name,
             Handle<Object> value) {
     Isolate* isolate = this->isolate();
+    RuntimeCallTimerScope timer(isolate,
+                                &RuntimeCallStats::AccessorNameSetterCallback);
     VMState<EXTERNAL> state(isolate);
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
     PropertyCallbackInfo<void> info(begin());
@@ -206,19 +214,19 @@
   static const int kIsolateIndex = T::kIsolateIndex;
   static const int kCalleeIndex = T::kCalleeIndex;
   static const int kContextSaveIndex = T::kContextSaveIndex;
+  static const int kNewTargetIndex = T::kNewTargetIndex;
 
   FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
                             internal::HeapObject* callee,
-                            internal::Object* holder, internal::Object** argv,
-                            int argc, bool is_construct_call)
-      : Super(isolate),
-        argv_(argv),
-        argc_(argc),
-        is_construct_call_(is_construct_call) {
+                            internal::Object* holder,
+                            internal::HeapObject* new_target,
+                            internal::Object** argv, int argc)
+      : Super(isolate), argv_(argv), argc_(argc) {
     Object** values = begin();
     values[T::kDataIndex] = data;
     values[T::kCalleeIndex] = callee;
     values[T::kHolderIndex] = holder;
+    values[T::kNewTargetIndex] = new_target;
     values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
     values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
     // Here the hole is set as default value.
@@ -245,7 +253,6 @@
  private:
   internal::Object** argv_;
   int argc_;
-  bool is_construct_call_;
 };
 
 }  // namespace internal
diff --git a/src/api-natives.cc b/src/api-natives.cc
index adf4b6a..fcd19cc 100644
--- a/src/api-natives.cc
+++ b/src/api-natives.cc
@@ -17,6 +17,7 @@
 
 MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
                                         Handle<ObjectTemplateInfo> data,
+                                        Handle<JSReceiver> new_target,
                                         bool is_hidden_prototype);
 
 MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
@@ -31,7 +32,7 @@
                                Handle<FunctionTemplateInfo>::cast(data), name);
   } else if (data->IsObjectTemplateInfo()) {
     return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
-                             false);
+                             Handle<JSReceiver>(), false);
   } else {
     return data;
   }
@@ -288,11 +289,25 @@
 
 MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
                                         Handle<ObjectTemplateInfo> info,
+                                        Handle<JSReceiver> new_target,
                                         bool is_hidden_prototype) {
-  // Fast path.
-  Handle<JSObject> result;
+  Handle<JSFunction> constructor;
   uint32_t serial_number =
       static_cast<uint32_t>(Smi::cast(info->serial_number())->value());
+  if (!new_target.is_null()) {
+    if (new_target->IsJSFunction() &&
+        JSFunction::cast(*new_target)->shared()->function_data() ==
+            info->constructor() &&
+        JSFunction::cast(*new_target)->context()->native_context() ==
+            isolate->context()->native_context()) {
+      constructor = Handle<JSFunction>::cast(new_target);
+    } else {
+      // Disable caching for subclass instantiation.
+      serial_number = 0;
+    }
+  }
+  // Fast path.
+  Handle<JSObject> result;
   if (serial_number) {
     // Probe cache.
     auto cache = isolate->template_instantiations_cache();
@@ -305,20 +320,27 @@
   }
   // Enter a new scope.  Recursion could otherwise create a lot of handles.
   HandleScope scope(isolate);
-  auto constructor = handle(info->constructor(), isolate);
-  Handle<JSFunction> cons;
-  if (constructor->IsUndefined()) {
-    cons = isolate->object_function();
-  } else {
-    auto cons_templ = Handle<FunctionTemplateInfo>::cast(constructor);
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
+
+  if (constructor.is_null()) {
+    Handle<Object> cons(info->constructor(), isolate);
+    if (cons->IsUndefined()) {
+      constructor = isolate->object_function();
+    } else {
+      auto cons_templ = Handle<FunctionTemplateInfo>::cast(cons);
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, constructor,
+                                 InstantiateFunction(isolate, cons_templ),
+                                 JSObject);
+    }
+
+    if (new_target.is_null()) new_target = constructor;
   }
-  auto object = isolate->factory()->NewJSObject(cons);
+
+  Handle<JSObject> object;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
+                             JSObject::New(constructor, new_target), JSObject);
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, result,
-      ConfigureInstance(isolate, object, info, is_hidden_prototype),
-      JSFunction);
+      ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject);
   // TODO(dcarney): is this necessary?
   JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
 
@@ -356,7 +378,7 @@
           isolate, prototype,
           InstantiateObject(isolate,
                             Handle<ObjectTemplateInfo>::cast(prototype_templ),
-                            data->hidden_prototype()),
+                            Handle<JSReceiver>(), data->hidden_prototype()),
           JSFunction);
     }
     auto parent = handle(data->parent_template(), isolate);
@@ -448,12 +470,11 @@
   return ::v8::internal::InstantiateFunction(isolate, data);
 }
 
-
 MaybeHandle<JSObject> ApiNatives::InstantiateObject(
-    Handle<ObjectTemplateInfo> data) {
+    Handle<ObjectTemplateInfo> data, Handle<JSReceiver> new_target) {
   Isolate* isolate = data->GetIsolate();
   InvokeScope invoke_scope(isolate);
-  return ::v8::internal::InstantiateObject(isolate, data, false);
+  return ::v8::internal::InstantiateObject(isolate, data, new_target, false);
 }
 
 
@@ -545,7 +566,7 @@
         if (!obj->needs_access_check() &&
             obj->named_property_handler()->IsUndefined() &&
             obj->indexed_property_handler()->IsUndefined()) {
-          type = JS_OBJECT_TYPE;
+          type = JS_API_OBJECT_TYPE;
         } else {
           type = JS_SPECIAL_API_OBJECT_TYPE;
         }
diff --git a/src/api-natives.h b/src/api-natives.h
index 91f0b16..66901fe 100644
--- a/src/api-natives.h
+++ b/src/api-natives.h
@@ -23,7 +23,8 @@
       Handle<FunctionTemplateInfo> data);
 
   MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
-      Handle<ObjectTemplateInfo> data);
+      Handle<ObjectTemplateInfo> data,
+      Handle<JSReceiver> new_target = Handle<JSReceiver>());
 
   enum ApiInstanceType {
     JavaScriptObjectType,
diff --git a/src/api.cc b/src/api.cc
index 853bd50..f757d1d 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -48,13 +48,13 @@
 #include "src/profiler/heap-profiler.h"
 #include "src/profiler/heap-snapshot-generator-inl.h"
 #include "src/profiler/profile-generator-inl.h"
-#include "src/profiler/sampler.h"
-#include "src/property.h"
+#include "src/profiler/tick-sample.h"
 #include "src/property-descriptor.h"
 #include "src/property-details.h"
+#include "src/property.h"
 #include "src/prototype.h"
-#include "src/runtime/runtime.h"
 #include "src/runtime-profiler.h"
+#include "src/runtime/runtime.h"
 #include "src/simulator.h"
 #include "src/snapshot/natives.h"
 #include "src/snapshot/snapshot.h"
@@ -66,57 +66,56 @@
 #include "src/version.h"
 #include "src/vm-state-inl.h"
 
-
 namespace v8 {
 
-#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
-
+#define LOG_API(isolate, class_name, function_name)                       \
+  i::RuntimeCallTimerScope _runtime_timer(                                \
+      isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \
+  LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
 
 #define ENTER_V8(isolate) i::VMState<v8::OTHER> __state__((isolate))
 
-
-#define PREPARE_FOR_EXECUTION_GENERIC(isolate, context, function_name, \
-                                      bailout_value, HandleScopeClass, \
-                                      do_callback)                     \
-  if (IsExecutionTerminatingCheck(isolate)) {                          \
-    return bailout_value;                                              \
-  }                                                                    \
-  HandleScopeClass handle_scope(isolate);                              \
-  CallDepthScope call_depth_scope(isolate, context, do_callback);      \
-  LOG_API(isolate, function_name);                                     \
-  ENTER_V8(isolate);                                                   \
+#define PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name,  \
+                                      function_name, bailout_value,  \
+                                      HandleScopeClass, do_callback) \
+  if (IsExecutionTerminatingCheck(isolate)) {                        \
+    return bailout_value;                                            \
+  }                                                                  \
+  HandleScopeClass handle_scope(isolate);                            \
+  CallDepthScope call_depth_scope(isolate, context, do_callback);    \
+  LOG_API(isolate, class_name, function_name);                       \
+  ENTER_V8(isolate);                                                 \
   bool has_pending_exception = false
 
-
-#define PREPARE_FOR_EXECUTION_WITH_CONTEXT(                                  \
-    context, function_name, bailout_value, HandleScopeClass, do_callback)    \
-  auto isolate = context.IsEmpty()                                           \
-                     ? i::Isolate::Current()                                 \
-                     : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
-  PREPARE_FOR_EXECUTION_GENERIC(isolate, context, function_name,             \
+#define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+                                           bailout_value, HandleScopeClass,    \
+                                           do_callback)                        \
+  auto isolate = context.IsEmpty()                                             \
+                     ? i::Isolate::Current()                                   \
+                     : reinterpret_cast<i::Isolate*>(context->GetIsolate());   \
+  PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name,   \
                                 bailout_value, HandleScopeClass, do_callback);
 
+#define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, class_name, function_name, \
+                                           T)                                  \
+  PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), class_name,         \
+                                function_name, MaybeLocal<T>(),                \
+                                InternalEscapableScope, false);
 
-#define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, function_name, T)     \
-  PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), function_name, \
-                                MaybeLocal<T>(), InternalEscapableScope,  \
-                                false);
+#define PREPARE_FOR_EXECUTION(context, class_name, function_name, T)          \
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name,      \
+                                     MaybeLocal<T>(), InternalEscapableScope, \
+                                     false)
 
+#define PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, class_name,              \
+                                            function_name, T)                 \
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name,      \
+                                     MaybeLocal<T>(), InternalEscapableScope, \
+                                     true)
 
-#define PREPARE_FOR_EXECUTION(context, function_name, T)                      \
-  PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, MaybeLocal<T>(), \
-                                     InternalEscapableScope, false)
-
-
-#define PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, function_name, T)        \
-  PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, MaybeLocal<T>(), \
-                                     InternalEscapableScope, true)
-
-
-#define PREPARE_FOR_EXECUTION_PRIMITIVE(context, function_name, T)         \
-  PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, Nothing<T>(), \
-                                     i::HandleScope, false)
-
+#define PREPARE_FOR_EXECUTION_PRIMITIVE(context, class_name, function_name, T) \
+  PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name,       \
+                                     Nothing<T>(), i::HandleScope, false)
 
 #define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \
   do {                                                 \
@@ -241,7 +240,7 @@
 
 // When V8 cannot allocated memory FatalProcessOutOfMemory is called.
 // The default fatal error handler is called and execution is stopped.
-void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
+void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
   i::Isolate* isolate = i::Isolate::Current();
   char last_few_messages[Heap::kTraceRingBufferSize + 1];
   char js_stacktrace[Heap::kStacktraceBufferSize + 1];
@@ -303,7 +302,9 @@
     PrintF("\n<--- Last few GCs --->\n%s\n", first_newline);
     PrintF("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
   }
-  Utils::ApiCheck(false, location, "Allocation failed - process out of memory");
+  Utils::ApiCheck(false, location, is_heap_oom
+                  ? "Allocation failed - JavaScript heap out of memory"
+                  : "Allocation failed - process out of memory");
   // If the fatal error handler returns, we stop execution.
   FATAL("API fatal error handler returned after process out of memory");
 }
@@ -656,7 +657,7 @@
 
 
 i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
-  LOG_API(isolate, "Persistent::New");
+  LOG_API(isolate, Persistent, New);
   i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
 #ifdef VERIFY_HEAP
   if (i::FLAG_verify_heap) {
@@ -682,13 +683,7 @@
   isolate->heap()->RegisterExternallyReferencedObject(object);
 }
 
-void V8::MakeWeak(i::Object** object, void* parameter,
-                  WeakCallback weak_callback) {
-  i::GlobalHandles::MakeWeak(object, parameter, weak_callback);
-}
-
-
-void V8::MakeWeak(i::Object** object, void* parameter,
+void V8::MakeWeak(i::Object** location, void* parameter,
                   int internal_field_index1, int internal_field_index2,
                   WeakCallbackInfo<void>::Callback weak_callback) {
   WeakCallbackType type = WeakCallbackType::kParameter;
@@ -703,24 +698,25 @@
     DCHECK_EQ(internal_field_index1, -1);
     DCHECK_EQ(internal_field_index2, -1);
   }
-  i::GlobalHandles::MakeWeak(object, parameter, weak_callback, type);
+  i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
 }
 
-
-void V8::MakeWeak(i::Object** object, void* parameter,
+void V8::MakeWeak(i::Object** location, void* parameter,
                   WeakCallbackInfo<void>::Callback weak_callback,
                   WeakCallbackType type) {
-  i::GlobalHandles::MakeWeak(object, parameter, weak_callback, type);
+  i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
 }
 
-
-void* V8::ClearWeak(i::Object** obj) {
-  return i::GlobalHandles::ClearWeakness(obj);
+void V8::MakeWeak(i::Object*** location_addr) {
+  i::GlobalHandles::MakeWeak(location_addr);
 }
 
+void* V8::ClearWeak(i::Object** location) {
+  return i::GlobalHandles::ClearWeakness(location);
+}
 
-void V8::DisposeGlobal(i::Object** obj) {
-  i::GlobalHandles::Destroy(obj);
+void V8::DisposeGlobal(i::Object** location) {
+  i::GlobalHandles::Destroy(location);
 }
 
 
@@ -1028,13 +1024,13 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   auto value_obj = Utils::OpenHandle(*value);
+  CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo());
   if (value_obj->IsObjectTemplateInfo()) {
     templ->set_serial_number(i::Smi::FromInt(0));
     if (templ->IsFunctionTemplateInfo()) {
       i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
     }
   }
-  // TODO(dcarney): split api to allow values of v8::Value or v8::TemplateInfo.
   i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
                                  value_obj,
                                  static_cast<i::PropertyAttributes>(attribute));
@@ -1136,19 +1132,19 @@
 }
 
 
-Local<FunctionTemplate> FunctionTemplate::New(Isolate* isolate,
-                                              FunctionCallback callback,
-                                              v8::Local<Value> data,
-                                              v8::Local<Signature> signature,
-                                              int length) {
+Local<FunctionTemplate> FunctionTemplate::New(
+    Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
+    v8::Local<Signature> signature, int length, ConstructorBehavior behavior) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   // Changes to the environment cannot be captured in the snapshot. Expect no
   // function templates when the isolate is created for serialization.
   DCHECK(!i_isolate->serializer_enabled());
-  LOG_API(i_isolate, "FunctionTemplate::New");
+  LOG_API(i_isolate, FunctionTemplate, New);
   ENTER_V8(i_isolate);
-  return FunctionTemplateNew(i_isolate, callback, nullptr, data, signature,
-                             length, false);
+  auto templ = FunctionTemplateNew(i_isolate, callback, nullptr, data,
+                                   signature, length, false);
+  if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
+  return templ;
 }
 
 
@@ -1158,7 +1154,7 @@
     v8::Local<Signature> signature, int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   DCHECK(!i_isolate->serializer_enabled());
-  LOG_API(i_isolate, "FunctionTemplate::NewWithFastHandler");
+  LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
   ENTER_V8(i_isolate);
   return FunctionTemplateNew(i_isolate, callback, fast_handler, data, signature,
                              length, false);
@@ -1223,8 +1219,10 @@
   return obj;
 }
 
+namespace {
+
 template <typename Getter, typename Setter>
-static i::Handle<i::AccessorInfo> MakeAccessorInfo(
+i::Handle<i::AccessorInfo> MakeAccessorInfo(
     v8::Local<Name> name, Getter getter, Setter setter, v8::Local<Value> data,
     v8::AccessControl settings, v8::PropertyAttribute attributes,
     v8::Local<AccessorSignature> signature, bool is_special_data_property) {
@@ -1235,6 +1233,8 @@
     setter = reinterpret_cast<Setter>(&i::Accessors::ReconfigureToDataProperty);
   }
   SET_FIELD_WRAPPED(obj, set_setter, setter);
+  i::Address redirected = obj->redirected_getter();
+  if (redirected != nullptr) SET_FIELD_WRAPPED(obj, set_js_getter, redirected);
   if (data.IsEmpty()) {
     data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
   }
@@ -1243,6 +1243,7 @@
   return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
 }
 
+}  // namespace
 
 Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
   i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this, true);
@@ -1337,7 +1338,7 @@
   // Changes to the environment cannot be captured in the snapshot. Expect no
   // object templates when the isolate is created for serialization.
   DCHECK(!isolate->serializer_enabled());
-  LOG_API(isolate, "ObjectTemplate::New");
+  LOG_API(isolate, ObjectTemplate, New);
   ENTER_V8(isolate);
   i::Handle<i::Struct> struct_obj =
       isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
@@ -1715,7 +1716,7 @@
   i::Handle<i::HeapObject> obj =
       i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
   i::Isolate* isolate = obj->GetIsolate();
-  LOG_API(isolate, "v8::UnboundScript::GetId");
+  LOG_API(isolate, UnboundScript, GetId);
   i::HandleScope scope(isolate);
   i::Handle<i::SharedFunctionInfo> function_info(
       i::SharedFunctionInfo::cast(*obj));
@@ -1728,7 +1729,7 @@
   i::Handle<i::SharedFunctionInfo> obj =
       i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
   i::Isolate* isolate = obj->GetIsolate();
-  LOG_API(isolate, "UnboundScript::GetLineNumber");
+  LOG_API(isolate, UnboundScript, GetLineNumber);
   if (obj->script()->IsScript()) {
     i::Handle<i::Script> script(i::Script::cast(obj->script()));
     return i::Script::GetLineNumber(script, code_pos);
@@ -1742,7 +1743,7 @@
   i::Handle<i::SharedFunctionInfo> obj =
       i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
   i::Isolate* isolate = obj->GetIsolate();
-  LOG_API(isolate, "UnboundScript::GetName");
+  LOG_API(isolate, UnboundScript, GetName);
   if (obj->script()->IsScript()) {
     i::Object* name = i::Script::cast(obj->script())->name();
     return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
@@ -1756,7 +1757,7 @@
   i::Handle<i::SharedFunctionInfo> obj =
       i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
   i::Isolate* isolate = obj->GetIsolate();
-  LOG_API(isolate, "UnboundScript::GetSourceURL");
+  LOG_API(isolate, UnboundScript, GetSourceURL);
   if (obj->script()->IsScript()) {
     i::Object* url = i::Script::cast(obj->script())->source_url();
     return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
@@ -1770,7 +1771,7 @@
   i::Handle<i::SharedFunctionInfo> obj =
       i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
   i::Isolate* isolate = obj->GetIsolate();
-  LOG_API(isolate, "UnboundScript::GetSourceMappingURL");
+  LOG_API(isolate, UnboundScript, GetSourceMappingURL);
   if (obj->script()->IsScript()) {
     i::Object* url = i::Script::cast(obj->script())->source_mapping_url();
     return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
@@ -1781,12 +1782,13 @@
 
 
 MaybeLocal<Value> Script::Run(Local<Context> context) {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Script::Run()", Value)
+  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Script, Run, Value)
+  i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
   i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   TRACE_EVENT0("v8", "V8.Execute");
   auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
-  i::Handle<i::Object> receiver(isolate->global_proxy(), isolate);
+  i::Handle<i::Object> receiver = isolate->global_proxy();
   Local<Value> result;
   has_pending_exception =
       !ToLocal<Value>(i::Execution::Call(isolate, fun, receiver, 0, NULL),
@@ -1817,8 +1819,8 @@
     Isolate* v8_isolate, Source* source, CompileOptions options,
     bool is_module) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
-  PREPARE_FOR_EXECUTION_WITH_ISOLATE(
-      isolate, "v8::ScriptCompiler::CompileUnbound()", UnboundScript);
+  PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, ScriptCompiler, CompileUnbound,
+                                     UnboundScript);
 
   // Don't try to produce any kind of cache when the debugger is loaded.
   if (isolate->debug()->is_loaded() &&
@@ -1977,8 +1979,8 @@
     Local<Context> v8_context, Source* source, size_t arguments_count,
     Local<String> arguments[], size_t context_extension_count,
     Local<Object> context_extensions[]) {
-  PREPARE_FOR_EXECUTION(
-      v8_context, "v8::ScriptCompiler::CompileFunctionInContext()", Function);
+  PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunctionInContext,
+                        Function);
   i::Handle<i::String> source_string;
   auto factory = isolate->factory();
   if (arguments_count) {
@@ -2032,6 +2034,8 @@
   }
 
   i::Handle<i::Object> name_obj;
+  int eval_scope_position = 0;
+  int eval_position = i::RelocInfo::kNoPosition;
   int line_offset = 0;
   int column_offset = 0;
   if (!source->resource_name.IsEmpty()) {
@@ -2044,11 +2048,13 @@
     column_offset = static_cast<int>(source->resource_column_offset->Value());
   }
   i::Handle<i::JSFunction> fun;
-  has_pending_exception = !i::Compiler::GetFunctionFromEval(
-                               source_string, outer_info, context, i::SLOPPY,
-                               i::ONLY_SINGLE_FUNCTION_LITERAL, line_offset,
-                               column_offset - scope_position, name_obj,
-                               source->resource_options).ToHandle(&fun);
+  has_pending_exception =
+      !i::Compiler::GetFunctionFromEval(
+           source_string, outer_info, context, i::SLOPPY,
+           i::ONLY_SINGLE_FUNCTION_LITERAL, eval_scope_position, eval_position,
+           line_offset, column_offset - scope_position, name_obj,
+           source->resource_options)
+           .ToHandle(&fun);
   if (has_pending_exception) {
     isolate->ReportPendingMessages();
   }
@@ -2088,7 +2094,7 @@
                                            StreamedSource* v8_source,
                                            Local<String> full_source_string,
                                            const ScriptOrigin& origin) {
-  PREPARE_FOR_EXECUTION(context, "v8::ScriptCompiler::Compile()", Script);
+  PREPARE_FOR_EXECUTION(context, ScriptCompiler, Compile, Script);
   i::StreamedSource* source = v8_source->impl();
   i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
   i::Handle<i::Script> script = isolate->factory()->NewScript(str);
@@ -2287,7 +2293,7 @@
   if (!HasCaught()) return v8::Local<Value>();
   i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
   if (!raw_obj->IsJSObject()) return v8::Local<Value>();
-  PREPARE_FOR_EXECUTION(context, "v8::TryCatch::StackTrace", Value);
+  PREPARE_FOR_EXECUTION(context, TryCatch, StackTrace, Value);
   i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
   i::Handle<i::String> name = isolate->factory()->stack_string();
   Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
@@ -2389,7 +2395,7 @@
 
 
 Maybe<int> Message::GetLineNumber(Local<Context> context) const {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetLineNumber()", int);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Message, GetLineNumber, int);
   i::Handle<i::JSFunction> fun = isolate->message_get_line_number();
   i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
   i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
@@ -2421,8 +2427,7 @@
 
 
 Maybe<int> Message::GetStartColumn(Local<Context> context) const {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetStartColumn()",
-                                  int);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Message, GetStartColumn, int);
   i::Handle<i::JSFunction> fun = isolate->message_get_column_number();
   i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
   i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
@@ -2444,7 +2449,7 @@
 
 Maybe<int> Message::GetEndColumn(Local<Context> context) const {
   auto self = Utils::OpenHandle(this);
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetEndColumn()", int);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Message, GetEndColumn, int);
   i::Handle<i::JSFunction> fun = isolate->message_get_column_number();
   i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
   i::Handle<i::Object> args[] = {self};
@@ -2488,7 +2493,7 @@
 
 
 MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
-  PREPARE_FOR_EXECUTION(context, "v8::Message::GetSourceLine()", String);
+  PREPARE_FOR_EXECUTION(context, Message, GetSourceLine, String);
   i::Handle<i::JSFunction> fun = isolate->message_get_source_line();
   i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
   i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
@@ -2733,7 +2738,7 @@
 
 MaybeLocal<Value> JSON::Parse(Isolate* v8_isolate, Local<String> json_string) {
   auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
-  PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, "JSON::Parse", Value);
+  PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, JSON, Parse, Value);
   i::Handle<i::String> string = Utils::OpenHandle(*json_string);
   i::Handle<i::String> source = i::String::Flatten(string);
   auto maybe = source->IsSeqOneByteString()
@@ -2745,13 +2750,38 @@
   RETURN_ESCAPED(result);
 }
 
-
-Local<Value> JSON::Parse(Local<String> json_string) {
-  auto isolate = reinterpret_cast<v8::Isolate*>(
-      Utils::OpenHandle(*json_string)->GetIsolate());
-  RETURN_TO_LOCAL_UNCHECKED(Parse(isolate, json_string), Value);
+MaybeLocal<Value> JSON::Parse(Local<Context> context,
+                              Local<String> json_string) {
+  PREPARE_FOR_EXECUTION(context, JSON, Parse, Value);
+  i::Handle<i::String> string = Utils::OpenHandle(*json_string);
+  i::Handle<i::String> source = i::String::Flatten(string);
+  auto maybe = source->IsSeqOneByteString()
+                   ? i::JsonParser<true>::Parse(source)
+                   : i::JsonParser<false>::Parse(source);
+  Local<Value> result;
+  has_pending_exception = !ToLocal<Value>(maybe, &result);
+  RETURN_ON_FAILED_EXECUTION(Value);
+  RETURN_ESCAPED(result);
 }
 
+Local<Value> JSON::Parse(Local<String> json_string) {
+  RETURN_TO_LOCAL_UNCHECKED(Parse(Local<Context>(), json_string), Value);
+}
+
+MaybeLocal<String> JSON::Stringify(Local<Context> context,
+                                   Local<Object> json_object) {
+  PREPARE_FOR_EXECUTION(context, JSON, Stringify, String);
+  i::Handle<i::Object> object = Utils::OpenHandle(*json_object);
+  i::Handle<i::Object> maybe;
+  has_pending_exception =
+      !i::Runtime::BasicJsonStringify(isolate, object).ToHandle(&maybe);
+  RETURN_ON_FAILED_EXECUTION(String);
+  Local<String> result;
+  has_pending_exception =
+      !ToLocal<String>(i::Object::ToString(isolate, maybe), &result);
+  RETURN_ON_FAILED_EXECUTION(String);
+  RETURN_ESCAPED(result);
+}
 
 // --- D a t a ---
 
@@ -2969,7 +2999,7 @@
 MaybeLocal<String> Value::ToString(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
   if (obj->IsString()) return ToApiHandle<String>(obj);
-  PREPARE_FOR_EXECUTION(context, "ToString", String);
+  PREPARE_FOR_EXECUTION(context, Object, ToString, String);
   Local<String> result;
   has_pending_exception =
       !ToLocal<String>(i::Object::ToString(isolate, obj), &result);
@@ -2986,7 +3016,7 @@
 MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
   if (obj->IsString()) return ToApiHandle<String>(obj);
-  PREPARE_FOR_EXECUTION(context, "ToDetailString", String);
+  PREPARE_FOR_EXECUTION(context, Object, ToDetailString, String);
   Local<String> result;
   i::Handle<i::Object> args[] = {obj};
   has_pending_exception = !ToLocal<String>(
@@ -3008,7 +3038,7 @@
 MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
   if (obj->IsJSReceiver()) return ToApiHandle<Object>(obj);
-  PREPARE_FOR_EXECUTION(context, "ToObject", Object);
+  PREPARE_FOR_EXECUTION(context, Object, ToObject, Object);
   Local<Object> result;
   has_pending_exception =
       !ToLocal<Object>(i::Object::ToObject(isolate, obj), &result);
@@ -3039,7 +3069,7 @@
 MaybeLocal<Number> Value::ToNumber(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
   if (obj->IsNumber()) return ToApiHandle<Number>(obj);
-  PREPARE_FOR_EXECUTION(context, "ToNumber", Number);
+  PREPARE_FOR_EXECUTION(context, Object, ToNumber, Number);
   Local<Number> result;
   has_pending_exception = !ToLocal<Number>(i::Object::ToNumber(obj), &result);
   RETURN_ON_FAILED_EXECUTION(Number);
@@ -3055,7 +3085,7 @@
 MaybeLocal<Integer> Value::ToInteger(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
   if (obj->IsSmi()) return ToApiHandle<Integer>(obj);
-  PREPARE_FOR_EXECUTION(context, "ToInteger", Integer);
+  PREPARE_FOR_EXECUTION(context, Object, ToInteger, Integer);
   Local<Integer> result;
   has_pending_exception =
       !ToLocal<Integer>(i::Object::ToInteger(isolate, obj), &result);
@@ -3073,7 +3103,7 @@
   auto obj = Utils::OpenHandle(this);
   if (obj->IsSmi()) return ToApiHandle<Int32>(obj);
   Local<Int32> result;
-  PREPARE_FOR_EXECUTION(context, "ToInt32", Int32);
+  PREPARE_FOR_EXECUTION(context, Object, ToInt32, Int32);
   has_pending_exception =
       !ToLocal<Int32>(i::Object::ToInt32(isolate, obj), &result);
   RETURN_ON_FAILED_EXECUTION(Int32);
@@ -3090,7 +3120,7 @@
   auto obj = Utils::OpenHandle(this);
   if (obj->IsSmi()) return ToApiHandle<Uint32>(obj);
   Local<Uint32> result;
-  PREPARE_FOR_EXECUTION(context, "ToUint32", Uint32);
+  PREPARE_FOR_EXECUTION(context, Object, ToUint32, Uint32);
   has_pending_exception =
       !ToLocal<Uint32>(i::Object::ToUint32(isolate, obj), &result);
   RETURN_ON_FAILED_EXECUTION(Uint32);
@@ -3105,62 +3135,55 @@
 
 void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
-  Utils::ApiCheck(isolate != NULL &&
-                  !isolate->IsDead(),
-                  "v8::internal::Internals::CheckInitialized()",
+  Utils::ApiCheck(isolate != NULL && !isolate->IsDead(),
+                  "v8::internal::Internals::CheckInitialized",
                   "Isolate is not initialized or V8 has died");
 }
 
 
 void External::CheckCast(v8::Value* that) {
-  Utils::ApiCheck(Utils::OpenHandle(that)->IsExternal(),
-                  "v8::External::Cast()",
+  Utils::ApiCheck(Utils::OpenHandle(that)->IsExternal(), "v8::External::Cast",
                   "Could not convert to external");
 }
 
 
 void v8::Object::CheckCast(Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsJSReceiver(), "v8::Object::Cast()",
+  Utils::ApiCheck(obj->IsJSReceiver(), "v8::Object::Cast",
                   "Could not convert to object");
 }
 
 
 void v8::Function::CheckCast(Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsCallable(), "v8::Function::Cast()",
+  Utils::ApiCheck(obj->IsCallable(), "v8::Function::Cast",
                   "Could not convert to function");
 }
 
 
 void v8::Boolean::CheckCast(v8::Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsBoolean(),
-                  "v8::Boolean::Cast()",
+  Utils::ApiCheck(obj->IsBoolean(), "v8::Boolean::Cast",
                   "Could not convert to boolean");
 }
 
 
 void v8::Name::CheckCast(v8::Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsName(),
-                  "v8::Name::Cast()",
-                  "Could not convert to name");
+  Utils::ApiCheck(obj->IsName(), "v8::Name::Cast", "Could not convert to name");
 }
 
 
 void v8::String::CheckCast(v8::Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsString(),
-                  "v8::String::Cast()",
+  Utils::ApiCheck(obj->IsString(), "v8::String::Cast",
                   "Could not convert to string");
 }
 
 
 void v8::Symbol::CheckCast(v8::Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsSymbol(),
-                  "v8::Symbol::Cast()",
+  Utils::ApiCheck(obj->IsSymbol(), "v8::Symbol::Cast",
                   "Could not convert to symbol");
 }
 
@@ -3175,62 +3198,56 @@
 
 void v8::Integer::CheckCast(v8::Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsNumber(),
-                  "v8::Integer::Cast()",
+  Utils::ApiCheck(obj->IsNumber(), "v8::Integer::Cast",
                   "Could not convert to number");
 }
 
 
 void v8::Int32::CheckCast(v8::Value* that) {
-  Utils::ApiCheck(that->IsInt32(), "v8::Int32::Cast()",
+  Utils::ApiCheck(that->IsInt32(), "v8::Int32::Cast",
                   "Could not convert to 32-bit signed integer");
 }
 
 
 void v8::Uint32::CheckCast(v8::Value* that) {
-  Utils::ApiCheck(that->IsUint32(), "v8::Uint32::Cast()",
+  Utils::ApiCheck(that->IsUint32(), "v8::Uint32::Cast",
                   "Could not convert to 32-bit unsigned integer");
 }
 
 
 void v8::Array::CheckCast(Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsJSArray(),
-                  "v8::Array::Cast()",
+  Utils::ApiCheck(obj->IsJSArray(), "v8::Array::Cast",
                   "Could not convert to array");
 }
 
 
 void v8::Map::CheckCast(Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsJSMap(), "v8::Map::Cast()",
-                  "Could not convert to Map");
+  Utils::ApiCheck(obj->IsJSMap(), "v8::Map::Cast", "Could not convert to Map");
 }
 
 
 void v8::Set::CheckCast(Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  Utils::ApiCheck(obj->IsJSSet(), "v8::Set::Cast()",
-                  "Could not convert to Set");
+  Utils::ApiCheck(obj->IsJSSet(), "v8_Set_Cast", "Could not convert to Set");
 }
 
 
 void v8::Promise::CheckCast(Value* that) {
-  Utils::ApiCheck(that->IsPromise(),
-                  "v8::Promise::Cast()",
+  Utils::ApiCheck(that->IsPromise(), "v8::Promise::Cast",
                   "Could not convert to promise");
 }
 
 
 void v8::Promise::Resolver::CheckCast(Value* that) {
-  Utils::ApiCheck(that->IsPromise(),
-                  "v8::Promise::Resolver::Cast()",
+  Utils::ApiCheck(that->IsPromise(), "v8::Promise::Resolver::Cast",
                   "Could not convert to promise resolver");
 }
 
 
 void v8::Proxy::CheckCast(Value* that) {
-  Utils::ApiCheck(that->IsProxy(), "v8::Proxy::Cast()",
+  Utils::ApiCheck(that->IsProxy(), "v8::Proxy::Cast",
                   "Could not convert to proxy");
 }
 
@@ -3367,7 +3384,7 @@
 Maybe<double> Value::NumberValue(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
   if (obj->IsNumber()) return Just(obj->Number());
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "NumberValue", double);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, NumberValue, double);
   i::Handle<i::Object> num;
   has_pending_exception = !i::Object::ToNumber(obj).ToHandle(&num);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(double);
@@ -3388,7 +3405,7 @@
   if (obj->IsNumber()) {
     return Just(NumberToInt64(*obj));
   }
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "IntegerValue", int64_t);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, IntegerValue, int64_t);
   i::Handle<i::Object> num;
   has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
@@ -3412,7 +3429,7 @@
 Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
   if (obj->IsNumber()) return Just(NumberToInt32(*obj));
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Int32Value", int32_t);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Int32Value, int32_t);
   i::Handle<i::Object> num;
   has_pending_exception = !i::Object::ToInt32(isolate, obj).ToHandle(&num);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int32_t);
@@ -3431,7 +3448,7 @@
 Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
   if (obj->IsNumber()) return Just(NumberToUint32(*obj));
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Uint32Value", uint32_t);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Uint32Value, uint32_t);
   i::Handle<i::Object> num;
   has_pending_exception = !i::Object::ToUint32(isolate, obj).ToHandle(&num);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(uint32_t);
@@ -3453,7 +3470,7 @@
     if (i::Smi::cast(*self)->value() >= 0) return Utils::Uint32ToLocal(self);
     return Local<Uint32>();
   }
-  PREPARE_FOR_EXECUTION(context, "ToArrayIndex", Uint32);
+  PREPARE_FOR_EXECUTION(context, Object, ToArrayIndex, Uint32);
   i::Handle<i::Object> string_obj;
   has_pending_exception =
       !i::Object::ToString(isolate, self).ToHandle(&string_obj);
@@ -3519,10 +3536,16 @@
   return self->SameValue(*other);
 }
 
+Local<String> Value::TypeOf(v8::Isolate* external_isolate) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+  ENTER_V8(isolate);
+  LOG_API(isolate, Value, TypeOf);
+  return Utils::ToLocal(i::Object::TypeOf(isolate, Utils::OpenHandle(this)));
+}
 
 Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
                             v8::Local<Value> key, v8::Local<Value> value) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool);
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   auto value_obj = Utils::OpenHandle(*value);
@@ -3542,7 +3565,7 @@
 
 Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
                             v8::Local<Value> value) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool);
   auto self = Utils::OpenHandle(this);
   auto value_obj = Utils::OpenHandle(*value);
   has_pending_exception = i::Object::SetElement(isolate, self, index, value_obj,
@@ -3561,8 +3584,7 @@
 Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
                                            v8::Local<Name> key,
                                            v8::Local<Value> value) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
-                                  bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, CreateDataProperty, bool);
   i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
   i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -3580,8 +3602,7 @@
 Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
                                            uint32_t index,
                                            v8::Local<Value> value) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
-                                  bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, CreateDataProperty, bool);
   i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
 
@@ -3598,8 +3619,7 @@
                                           v8::Local<Name> key,
                                           v8::Local<Value> value,
                                           v8::PropertyAttribute attributes) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DefineOwnProperty()",
-                                  bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DefineOwnProperty, bool);
   i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
   i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -3642,7 +3662,7 @@
 Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
                                  v8::Local<Value> key, v8::Local<Value> value,
                                  v8::PropertyAttribute attribs) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::ForceSet()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, ForceSet, bool);
   auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
   auto key_obj = Utils::OpenHandle(*key);
   auto value_obj = Utils::OpenHandle(*value);
@@ -3658,9 +3678,8 @@
 bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
                           v8::PropertyAttribute attribs) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(),
-                                "v8::Object::ForceSet", false, i::HandleScope,
-                                false);
+  PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), Object, ForceSet,
+                                false, i::HandleScope, false);
   i::Handle<i::JSObject> self =
       i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
   i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
@@ -3676,7 +3695,7 @@
 
 Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
                                    Local<Value> value) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetPrivate()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetPrivate, bool);
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(reinterpret_cast<Name*>(*key));
   auto value_obj = Utils::OpenHandle(*value);
@@ -3702,7 +3721,7 @@
 
 MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
                                   Local<Value> key) {
-  PREPARE_FOR_EXECUTION(context, "v8::Object::Get()", Value);
+  PREPARE_FOR_EXECUTION(context, Object, Get, Value);
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> result;
@@ -3720,7 +3739,7 @@
 
 
 MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
-  PREPARE_FOR_EXECUTION(context, "v8::Object::Get()", Value);
+  PREPARE_FOR_EXECUTION(context, Object, Get, Value);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> result;
   has_pending_exception =
@@ -3744,8 +3763,8 @@
 
 Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
     Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(
-      context, "v8::Object::GetPropertyAttributes()", PropertyAttribute);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, GetPropertyAttributes,
+                                  PropertyAttribute);
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   if (!key_obj->IsName()) {
@@ -3773,8 +3792,7 @@
 
 MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
                                                        Local<String> key) {
-  PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyDescriptor()",
-                        Value);
+  PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyDescriptor, Value);
   i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_name = Utils::OpenHandle(*key);
 
@@ -3806,7 +3824,7 @@
 
 Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
                                      Local<Value> value) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetPrototype()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetPrototype, bool);
   auto self = Utils::OpenHandle(this);
   auto value_obj = Utils::OpenHandle(*value);
   // We do not allow exceptions thrown while setting the prototype
@@ -3844,7 +3862,7 @@
 
 
 MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
-  PREPARE_FOR_EXECUTION(context, "v8::Object::GetPropertyNames()", Array);
+  PREPARE_FOR_EXECUTION(context, Object, GetPropertyNames, Array);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::FixedArray> value;
   has_pending_exception =
@@ -3864,13 +3882,24 @@
   RETURN_TO_LOCAL_UNCHECKED(GetPropertyNames(context), Array);
 }
 
-
 MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
-  PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyNames()", Array);
+  return GetOwnPropertyNames(
+      context, static_cast<v8::PropertyFilter>(ONLY_ENUMERABLE | SKIP_SYMBOLS));
+}
+
+Local<Array> v8::Object::GetOwnPropertyNames() {
+  auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+  RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
+}
+
+MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context,
+                                                  PropertyFilter filter) {
+  PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyNames, Array);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::FixedArray> value;
   has_pending_exception =
-      !i::JSReceiver::GetKeys(self, i::OWN_ONLY, i::ENUMERABLE_STRINGS)
+      !i::JSReceiver::GetKeys(self, i::OWN_ONLY,
+                              static_cast<i::PropertyFilter>(filter))
            .ToHandle(&value);
   RETURN_ON_FAILED_EXECUTION(Array);
   DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
@@ -3880,15 +3909,8 @@
   RETURN_ESCAPED(Utils::ToLocal(result));
 }
 
-
-Local<Array> v8::Object::GetOwnPropertyNames() {
-  auto context = ContextFromHeapObject(Utils::OpenHandle(this));
-  RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
-}
-
-
 MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
-  PREPARE_FOR_EXECUTION(context, "v8::Object::ObjectProtoToString", String);
+  PREPARE_FOR_EXECUTION(context, Object, ObjectProtoToString, String);
   auto obj = Utils::OpenHandle(this);
   Local<String> result;
   has_pending_exception =
@@ -3912,8 +3934,7 @@
 
 Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
                                           IntegrityLevel level) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetIntegrityLevel()",
-                                  bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetIntegrityLevel, bool);
   auto self = Utils::OpenHandle(this);
   i::JSReceiver::IntegrityLevel i_level =
       level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
@@ -3925,7 +3946,7 @@
 }
 
 Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Delete()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Delete, bool);
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   Maybe<bool> result =
@@ -3949,7 +3970,7 @@
 
 
 Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Get, bool);
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   Maybe<bool> maybe = Nothing<bool>();
@@ -3982,8 +4003,7 @@
 
 
 Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DeleteProperty()",
-                                  bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DeleteProperty, bool);
   auto self = Utils::OpenHandle(this);
   Maybe<bool> result = i::JSReceiver::DeleteElement(self, index);
   has_pending_exception = result.IsNothing();
@@ -3999,7 +4019,7 @@
 
 
 Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Get, bool);
   auto self = Utils::OpenHandle(this);
   auto maybe = i::JSReceiver::HasElement(self, index);
   has_pending_exception = maybe.IsNothing();
@@ -4020,7 +4040,7 @@
                                      Setter setter, Data data,
                                      AccessControl settings,
                                      PropertyAttribute attributes) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetAccessor()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetAccessor, bool);
   if (!Utils::OpenHandle(self)->IsJSObject()) return Just(false);
   i::Handle<i::JSObject> obj =
       i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
@@ -4092,8 +4112,7 @@
 
 Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
                                        Local<Name> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::HasOwnProperty()",
-                                  bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasOwnProperty, bool);
   auto self = Utils::OpenHandle(this);
   auto key_val = Utils::OpenHandle(*key);
   auto result = i::JSReceiver::HasOwnProperty(self, key_val);
@@ -4102,6 +4121,14 @@
   return result;
 }
 
+Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context, uint32_t index) {
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasOwnProperty, bool);
+  auto self = Utils::OpenHandle(this);
+  auto result = i::JSReceiver::HasOwnProperty(self, index);
+  has_pending_exception = result.IsNothing();
+  RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  return result;
+}
 
 bool v8::Object::HasOwnProperty(Local<String> key) {
   auto context = ContextFromHeapObject(Utils::OpenHandle(this));
@@ -4111,8 +4138,7 @@
 
 Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
                                              Local<Name> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::HasRealNamedProperty()",
-                                  bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealNamedProperty, bool);
   auto self = Utils::OpenHandle(this);
   if (!self->IsJSObject()) return Just(false);
   auto key_val = Utils::OpenHandle(*key);
@@ -4132,8 +4158,8 @@
 
 Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
                                                uint32_t index) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context,
-                                  "v8::Object::HasRealIndexedProperty()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealIndexedProperty,
+                                  bool);
   auto self = Utils::OpenHandle(this);
   if (!self->IsJSObject()) return Just(false);
   auto result = i::JSObject::HasRealElementProperty(
@@ -4152,8 +4178,8 @@
 
 Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
                                                      Local<Name> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(
-      context, "v8::Object::HasRealNamedCallbackProperty()", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealNamedCallbackProperty,
+                                  bool);
   auto self = Utils::OpenHandle(this);
   if (!self->IsJSObject()) return Just(false);
   auto key_val = Utils::OpenHandle(*key);
@@ -4187,8 +4213,8 @@
 
 MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
     Local<Context> context, Local<Name> key) {
-  PREPARE_FOR_EXECUTION(
-      context, "v8::Object::GetRealNamedPropertyInPrototypeChain()", Value);
+  PREPARE_FOR_EXECUTION(context, Object, GetRealNamedPropertyInPrototypeChain,
+                        Value);
   i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
   if (!self->IsJSObject()) return MaybeLocal<Value>();
   i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
@@ -4219,7 +4245,7 @@
 v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
     Local<Context> context, Local<Name> key) {
   PREPARE_FOR_EXECUTION_PRIMITIVE(
-      context, "v8::Object::GetRealNamedPropertyAttributesInPrototypeChain()",
+      context, Object, GetRealNamedPropertyAttributesInPrototypeChain,
       PropertyAttribute);
   i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
   if (!self->IsJSObject()) return Nothing<PropertyAttribute>();
@@ -4249,7 +4275,7 @@
 
 MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
                                                    Local<Name> key) {
-  PREPARE_FOR_EXECUTION(context, "v8::Object::GetRealNamedProperty()", Value);
+  PREPARE_FOR_EXECUTION(context, Object, GetRealNamedProperty, Value);
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   i::LookupIterator it = i::LookupIterator::PropertyOrElement(
@@ -4272,8 +4298,7 @@
 Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
     Local<Context> context, Local<Name> key) {
   PREPARE_FOR_EXECUTION_PRIMITIVE(
-      context, "v8::Object::GetRealNamedPropertyAttributes()",
-      PropertyAttribute);
+      context, Object, GetRealNamedPropertyAttributes, PropertyAttribute);
   auto self = Utils::OpenHandle(this);
   auto key_obj = Utils::OpenHandle(*key);
   i::LookupIterator it = i::LookupIterator::PropertyOrElement(
@@ -4322,70 +4347,21 @@
 }
 
 
-bool v8::Object::SetHiddenValue(v8::Local<v8::String> key,
-                                v8::Local<v8::Value> value) {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  i::HandleScope scope(isolate);
-  i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
-  if (!self->IsJSObject()) return false;
-  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::Handle<i::String> key_string =
-      isolate->factory()->InternalizeString(key_obj);
-  if (value.IsEmpty()) {
-    i::JSObject::DeleteHiddenProperty(i::Handle<i::JSObject>::cast(self),
-                                      key_string);
-    return true;
-  }
-  i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
-  i::Handle<i::Object> result = i::JSObject::SetHiddenProperty(
-      i::Handle<i::JSObject>::cast(self), key_string, value_obj);
-  return *result == *self;
-}
-
-
-v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Local<v8::String> key) {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
-  if (!self->IsJSObject()) return v8::Local<v8::Value>();
-  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::Handle<i::String> key_string =
-      isolate->factory()->InternalizeString(key_obj);
-  i::Handle<i::Object> result(
-      i::Handle<i::JSObject>::cast(self)->GetHiddenProperty(key_string),
-      isolate);
-  if (result->IsTheHole()) return v8::Local<v8::Value>();
-  return Utils::ToLocal(result);
-}
-
-
-bool v8::Object::DeleteHiddenValue(v8::Local<v8::String> key) {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  i::HandleScope scope(isolate);
-  i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
-  if (!self->IsJSObject()) return false;
-  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::Handle<i::String> key_string =
-      isolate->factory()->InternalizeString(key_obj);
-  i::JSObject::DeleteHiddenProperty(i::Handle<i::JSObject>::cast(self),
-                                    key_string);
-  return true;
-}
-
-
 bool v8::Object::IsCallable() {
   auto self = Utils::OpenHandle(this);
   return self->IsCallable();
 }
 
+bool v8::Object::IsConstructor() {
+  auto self = Utils::OpenHandle(this);
+  return self->IsConstructor();
+}
 
 MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
                                          Local<Value> recv, int argc,
                                          Local<Value> argv[]) {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Object::CallAsFunction()",
-                                      Value);
+  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Object, CallAsFunction, Value);
+  i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   TRACE_EVENT0("v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
@@ -4411,8 +4387,9 @@
 
 MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
                                             Local<Value> argv[]) {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context,
-                                      "v8::Object::CallAsConstructor()", Value);
+  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Object, CallAsConstructor,
+                                      Value);
+  i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   TRACE_EVENT0("v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
@@ -4433,22 +4410,23 @@
   RETURN_TO_LOCAL_UNCHECKED(CallAsConstructor(context, argc, argv_cast), Value);
 }
 
-
 MaybeLocal<Function> Function::New(Local<Context> context,
                                    FunctionCallback callback, Local<Value> data,
-                                   int length) {
+                                   int length, ConstructorBehavior behavior) {
   i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
-  LOG_API(isolate, "Function::New");
+  LOG_API(isolate, Function, New);
   ENTER_V8(isolate);
-  return FunctionTemplateNew(isolate, callback, nullptr, data,
-                             Local<Signature>(), length, true)
-      ->GetFunction(context);
+  auto templ = FunctionTemplateNew(isolate, callback, nullptr, data,
+                                   Local<Signature>(), length, true);
+  if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
+  return templ->GetFunction(context);
 }
 
 
 Local<Function> Function::New(Isolate* v8_isolate, FunctionCallback callback,
                               Local<Value> data, int length) {
-  return Function::New(v8_isolate->GetCurrentContext(), callback, data, length)
+  return Function::New(v8_isolate->GetCurrentContext(), callback, data, length,
+                       ConstructorBehavior::kAllow)
       .FromMaybe(Local<Function>());
 }
 
@@ -4461,8 +4439,8 @@
 
 MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
                                          v8::Local<v8::Value> argv[]) const {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::NewInstance()",
-                                      Object);
+  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Function, NewInstance, Object);
+  i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   TRACE_EVENT0("v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
@@ -4486,7 +4464,8 @@
 MaybeLocal<v8::Value> Function::Call(Local<Context> context,
                                      v8::Local<v8::Value> recv, int argc,
                                      v8::Local<v8::Value> argv[]) {
-  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::Call()", Value);
+  PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Function, Call, Value);
+  i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   TRACE_EVENT0("v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
@@ -4518,16 +4497,20 @@
 
 Local<Value> Function::GetName() const {
   auto self = Utils::OpenHandle(this);
+  i::Isolate* isolate = self->GetIsolate();
   if (self->IsJSBoundFunction()) {
     auto func = i::Handle<i::JSBoundFunction>::cast(self);
-    return Utils::ToLocal(handle(func->name(), func->GetIsolate()));
+    i::Handle<i::Object> name;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, name,
+                                     i::JSBoundFunction::GetName(isolate, func),
+                                     Local<Value>());
+    return Utils::ToLocal(name);
   }
   if (self->IsJSFunction()) {
     auto func = i::Handle<i::JSFunction>::cast(self);
-    return Utils::ToLocal(handle(func->shared()->name(), func->GetIsolate()));
+    return Utils::ToLocal(handle(func->shared()->name(), isolate));
   }
-  return ToApiHandle<Primitive>(
-      self->GetIsolate()->factory()->undefined_value());
+  return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
 }
 
 
@@ -5195,7 +5178,7 @@
                       int* nchars_ref,
                       int options) const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  LOG_API(isolate, "String::WriteUtf8");
+  LOG_API(isolate, String, WriteUtf8);
   ENTER_V8(isolate);
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (options & HINT_MANY_WRITES_EXPECTED) {
@@ -5248,7 +5231,7 @@
                               int length,
                               int options) {
   i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
-  LOG_API(isolate, "String::Write");
+  LOG_API(isolate, String, Write);
   ENTER_V8(isolate);
   DCHECK(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(string);
@@ -5622,7 +5605,7 @@
                                 v8::Local<ObjectTemplate> global_template,
                                 v8::Local<Value> global_object) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
-  LOG_API(isolate, "Context::New");
+  LOG_API(isolate, Context, New);
   i::HandleScope scope(isolate);
   ExtensionConfiguration no_extensions;
   if (extensions == NULL) extensions = &no_extensions;
@@ -5725,7 +5708,7 @@
 
 
 MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
-  PREPARE_FOR_EXECUTION(context, "v8::ObjectTemplate::NewInstance()", Object);
+  PREPARE_FOR_EXECUTION(context, ObjectTemplate, NewInstance, Object);
   auto self = Utils::OpenHandle(this);
   Local<Object> result;
   has_pending_exception =
@@ -5742,8 +5725,7 @@
 
 
 MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
-  PREPARE_FOR_EXECUTION(context, "v8::FunctionTemplate::GetFunction()",
-                        Function);
+  PREPARE_FOR_EXECUTION(context, FunctionTemplate, GetFunction, Function);
   auto self = Utils::OpenHandle(this);
   Local<Function> result;
   has_pending_exception =
@@ -5769,7 +5751,7 @@
 Local<External> v8::External::New(Isolate* isolate, void* value) {
   STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "External::New");
+  LOG_API(i_isolate, External, New);
   ENTER_V8(i_isolate);
   i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
   return Utils::ExternalToLocal(external);
@@ -5837,42 +5819,42 @@
 
 STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength);
 
-
-template <typename Char>
-inline MaybeLocal<String> NewString(Isolate* v8_isolate, const char* location,
-                                    const char* env, const Char* data,
-                                    v8::NewStringType type, int length) {
-  i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate);
-  if (length == 0) return String::Empty(v8_isolate);
-  // TODO(dcarney): throw a context free exception.
-  if (length > i::String::kMaxLength) return MaybeLocal<String>();
-  ENTER_V8(isolate);
-  LOG_API(isolate, env);
-  if (length < 0) length = StringLength(data);
-  i::Handle<i::String> result =
-      NewString(isolate->factory(), type, i::Vector<const Char>(data, length))
-          .ToHandleChecked();
-  return Utils::ToLocal(result);
-}
-
 }  // anonymous namespace
 
+// TODO(dcarney): throw a context free exception.
+#define NEW_STRING(isolate, class_name, function_name, Char, data, type,   \
+                   length)                                                 \
+  MaybeLocal<String> result;                                               \
+  if (length == 0) {                                                       \
+    result = String::Empty(isolate);                                       \
+  } else if (length > i::String::kMaxLength) {                             \
+    result = MaybeLocal<String>();                                         \
+  } else {                                                                 \
+    i::Isolate* i_isolate = reinterpret_cast<internal::Isolate*>(isolate); \
+    ENTER_V8(i_isolate);                                                   \
+    LOG_API(i_isolate, class_name, function_name);                         \
+    if (length < 0) length = StringLength(data);                           \
+    i::Handle<i::String> handle_result =                                   \
+        NewString(i_isolate->factory(), type,                              \
+                  i::Vector<const Char>(data, length))                     \
+            .ToHandleChecked();                                            \
+    result = Utils::ToLocal(handle_result);                                \
+  }
 
 Local<String> String::NewFromUtf8(Isolate* isolate,
                                   const char* data,
                                   NewStringType type,
                                   int length) {
-  RETURN_TO_LOCAL_UNCHECKED(
-      NewString(isolate, "v8::String::NewFromUtf8()", "String::NewFromUtf8",
-                data, static_cast<v8::NewStringType>(type), length),
-      String);
+  NEW_STRING(isolate, String, NewFromUtf8, char, data,
+             static_cast<v8::NewStringType>(type), length);
+  RETURN_TO_LOCAL_UNCHECKED(result, String);
 }
 
 
 MaybeLocal<String> String::NewFromUtf8(Isolate* isolate, const char* data,
                                        v8::NewStringType type, int length) {
-  return NewString(isolate, "v8::String::NewFromUtf8()", "String::NewFromUtf8",
-                   data, type, length);
+  NEW_STRING(isolate, String, NewFromUtf8, char, data, type, length);
+  return result;
 }
 
 
@@ -5880,18 +5862,16 @@
                                      const uint8_t* data,
                                      NewStringType type,
                                      int length) {
-  RETURN_TO_LOCAL_UNCHECKED(
-      NewString(isolate, "v8::String::NewFromOneByte()",
-                "String::NewFromOneByte", data,
-                static_cast<v8::NewStringType>(type), length),
-      String);
+  NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data,
+             static_cast<v8::NewStringType>(type), length);
+  RETURN_TO_LOCAL_UNCHECKED(result, String);
 }
 
 
 MaybeLocal<String> String::NewFromOneByte(Isolate* isolate, const uint8_t* data,
                                           v8::NewStringType type, int length) {
-  return NewString(isolate, "v8::String::NewFromOneByte()",
-                   "String::NewFromOneByte", data, type, length);
+  NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data, type, length);
+  return result;
 }
 
 
@@ -5899,19 +5879,17 @@
                                      const uint16_t* data,
                                      NewStringType type,
                                      int length) {
-  RETURN_TO_LOCAL_UNCHECKED(
-      NewString(isolate, "v8::String::NewFromTwoByte()",
-                "String::NewFromTwoByte", data,
-                static_cast<v8::NewStringType>(type), length),
-      String);
+  NEW_STRING(isolate, String, NewFromTwoByte, uint16_t, data,
+             static_cast<v8::NewStringType>(type), length);
+  RETURN_TO_LOCAL_UNCHECKED(result, String);
 }
 
 
 MaybeLocal<String> String::NewFromTwoByte(Isolate* isolate,
                                           const uint16_t* data,
                                           v8::NewStringType type, int length) {
-  return NewString(isolate, "v8::String::NewFromTwoByte()",
-                   "String::NewFromTwoByte", data, type, length);
+  NEW_STRING(isolate, String, NewFromTwoByte, uint16_t, data, type, length);
+  return result;
 }
 
 
@@ -5919,7 +5897,7 @@
   i::Handle<i::String> left_string = Utils::OpenHandle(*left);
   i::Isolate* isolate = left_string->GetIsolate();
   ENTER_V8(isolate);
-  LOG_API(isolate, "v8::String::Concat");
+  LOG_API(isolate, String, Concat);
   i::Handle<i::String> right_string = Utils::OpenHandle(*right);
   // If we are steering towards a range error, do not wait for the error to be
   // thrown, and return the null handle instead.
@@ -5941,7 +5919,7 @@
   }
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   ENTER_V8(i_isolate);
-  LOG_API(i_isolate, "String::NewExternalTwoByte");
+  LOG_API(i_isolate, String, NewExternalTwoByte);
   i::Handle<i::String> string = i_isolate->factory()
                                     ->NewExternalStringFromTwoByte(resource)
                                     .ToHandleChecked();
@@ -5965,7 +5943,7 @@
   }
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   ENTER_V8(i_isolate);
-  LOG_API(i_isolate, "String::NewExternalOneByte");
+  LOG_API(i_isolate, String, NewExternalOneByte);
   i::Handle<i::String> string = i_isolate->factory()
                                     ->NewExternalStringFromOneByte(resource)
                                     .ToHandleChecked();
@@ -6048,7 +6026,7 @@
 
 Local<v8::Object> v8::Object::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "Object::New");
+  LOG_API(i_isolate, Object, New);
   ENTER_V8(i_isolate);
   i::Handle<i::JSObject> obj =
       i_isolate->factory()->NewJSObject(i_isolate->object_function());
@@ -6058,7 +6036,7 @@
 
 Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "NumberObject::New");
+  LOG_API(i_isolate, NumberObject, New);
   ENTER_V8(i_isolate);
   i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
   i::Handle<i::Object> obj =
@@ -6071,14 +6049,14 @@
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
   i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
   i::Isolate* isolate = jsvalue->GetIsolate();
-  LOG_API(isolate, "NumberObject::NumberValue");
+  LOG_API(isolate, NumberObject, NumberValue);
   return jsvalue->value()->Number();
 }
 
 
 Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "BooleanObject::New");
+  LOG_API(i_isolate, BooleanObject, New);
   ENTER_V8(i_isolate);
   i::Handle<i::Object> boolean(value ? i_isolate->heap()->true_value()
                                      : i_isolate->heap()->false_value(),
@@ -6098,7 +6076,7 @@
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
   i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
   i::Isolate* isolate = jsvalue->GetIsolate();
-  LOG_API(isolate, "BooleanObject::BooleanValue");
+  LOG_API(isolate, BooleanObject, BooleanValue);
   return jsvalue->value()->IsTrue();
 }
 
@@ -6106,7 +6084,7 @@
 Local<v8::Value> v8::StringObject::New(Local<String> value) {
   i::Handle<i::String> string = Utils::OpenHandle(*value);
   i::Isolate* isolate = string->GetIsolate();
-  LOG_API(isolate, "StringObject::New");
+  LOG_API(isolate, StringObject, New);
   ENTER_V8(isolate);
   i::Handle<i::Object> obj =
       i::Object::ToObject(isolate, string).ToHandleChecked();
@@ -6118,7 +6096,7 @@
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
   i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
   i::Isolate* isolate = jsvalue->GetIsolate();
-  LOG_API(isolate, "StringObject::StringValue");
+  LOG_API(isolate, StringObject, StringValue);
   return Utils::ToLocal(
       i::Handle<i::String>(i::String::cast(jsvalue->value())));
 }
@@ -6126,7 +6104,7 @@
 
 Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "SymbolObject::New");
+  LOG_API(i_isolate, SymbolObject, New);
   ENTER_V8(i_isolate);
   i::Handle<i::Object> obj = i::Object::ToObject(
       i_isolate, Utils::OpenHandle(*value)).ToHandleChecked();
@@ -6138,7 +6116,7 @@
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
   i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
   i::Isolate* isolate = jsvalue->GetIsolate();
-  LOG_API(isolate, "SymbolObject::SymbolValue");
+  LOG_API(isolate, SymbolObject, SymbolValue);
   return Utils::ToLocal(
       i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value())));
 }
@@ -6149,7 +6127,7 @@
     // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
     time = std::numeric_limits<double>::quiet_NaN();
   }
-  PREPARE_FOR_EXECUTION(context, "Date::New", Value);
+  PREPARE_FOR_EXECUTION(context, Date, New, Value);
   Local<Value> result;
   has_pending_exception = !ToLocal<Value>(
       i::JSDate::New(isolate->date_function(), isolate->date_function(), time),
@@ -6169,14 +6147,14 @@
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
   i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
   i::Isolate* isolate = jsdate->GetIsolate();
-  LOG_API(isolate, "Date::NumberValue");
+  LOG_API(isolate, Date, NumberValue);
   return jsdate->value()->Number();
 }
 
 
 void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "Date::DateTimeConfigurationChangeNotification");
+  LOG_API(i_isolate, Date, DateTimeConfigurationChangeNotification);
   ENTER_V8(i_isolate);
   i_isolate->date_cache()->ResetDateCache();
   if (!i_isolate->eternal_handles()->Exists(
@@ -6196,7 +6174,7 @@
 
 MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
                                        Local<String> pattern, Flags flags) {
-  PREPARE_FOR_EXECUTION(context, "RegExp::New", RegExp);
+  PREPARE_FOR_EXECUTION(context, RegExp, New, RegExp);
   Local<v8::RegExp> result;
   has_pending_exception =
       !ToLocal<RegExp>(i::JSRegExp::New(Utils::OpenHandle(*pattern),
@@ -6241,7 +6219,7 @@
 
 Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "Array::New");
+  LOG_API(i_isolate, Array, New);
   ENTER_V8(i_isolate);
   int real_length = length > 0 ? length : 0;
   i::Handle<i::JSArray> obj = i_isolate->factory()->NewJSArray(real_length);
@@ -6265,7 +6243,7 @@
 
 MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
                                          uint32_t index) {
-  PREPARE_FOR_EXECUTION(context, "v8::Array::CloneElementAt()", Object);
+  PREPARE_FOR_EXECUTION(context, Array, CloneElementAt, Object);
   auto self = Utils::OpenHandle(this);
   if (!self->HasFastObjectElements()) return Local<Object>();
   i::FixedArray* elms = i::FixedArray::cast(self->elements());
@@ -6286,7 +6264,7 @@
 
 Local<v8::Map> v8::Map::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "Map::New");
+  LOG_API(i_isolate, Map, New);
   ENTER_V8(i_isolate);
   i::Handle<i::JSMap> obj = i_isolate->factory()->NewJSMap();
   return Utils::ToLocal(obj);
@@ -6302,14 +6280,14 @@
 void Map::Clear() {
   auto self = Utils::OpenHandle(this);
   i::Isolate* isolate = self->GetIsolate();
-  LOG_API(isolate, "Map::Clear");
+  LOG_API(isolate, Map, Clear);
   ENTER_V8(isolate);
   i::JSMap::Clear(self);
 }
 
 
 MaybeLocal<Value> Map::Get(Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION(context, "Map::Get", Value);
+  PREPARE_FOR_EXECUTION(context, Map, Get, Value);
   auto self = Utils::OpenHandle(this);
   Local<Value> result;
   i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6324,7 +6302,7 @@
 
 MaybeLocal<Map> Map::Set(Local<Context> context, Local<Value> key,
                          Local<Value> value) {
-  PREPARE_FOR_EXECUTION(context, "Map::Set", Map);
+  PREPARE_FOR_EXECUTION(context, Map, Set, Map);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> result;
   i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
@@ -6338,7 +6316,7 @@
 
 
 Maybe<bool> Map::Has(Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Map::Has", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Map, Has, bool);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> result;
   i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6351,7 +6329,7 @@
 
 
 Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Map::Delete", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Map, Delete, bool);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> result;
   i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6367,17 +6345,25 @@
   i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
   i::Factory* factory = isolate->factory();
-  LOG_API(isolate, "Map::AsArray");
+  LOG_API(isolate, Map, AsArray);
   ENTER_V8(isolate);
   i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(obj->table()));
-  int size = table->NumberOfElements();
-  int length = size * 2;
+  int length = table->NumberOfElements() * 2;
   i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
-  for (int i = 0; i < size; ++i) {
-    if (table->KeyAt(i)->IsTheHole()) continue;
-    result->set(i * 2, table->KeyAt(i));
-    result->set(i * 2 + 1, table->ValueAt(i));
+  int result_index = 0;
+  {
+    i::DisallowHeapAllocation no_gc;
+    int capacity = table->UsedCapacity();
+    i::Oddball* the_hole = isolate->heap()->the_hole_value();
+    for (int i = 0; i < capacity; ++i) {
+      i::Object* key = table->KeyAt(i);
+      if (key == the_hole) continue;
+      result->set(result_index++, key);
+      result->set(result_index++, table->ValueAt(i));
+    }
   }
+  DCHECK_EQ(result_index, result->length());
+  DCHECK_EQ(result_index, length);
   i::Handle<i::JSArray> result_array =
       factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
   return Utils::ToLocal(result_array);
@@ -6386,7 +6372,7 @@
 
 Local<v8::Set> v8::Set::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "Set::New");
+  LOG_API(i_isolate, Set, New);
   ENTER_V8(i_isolate);
   i::Handle<i::JSSet> obj = i_isolate->factory()->NewJSSet();
   return Utils::ToLocal(obj);
@@ -6402,14 +6388,14 @@
 void Set::Clear() {
   auto self = Utils::OpenHandle(this);
   i::Isolate* isolate = self->GetIsolate();
-  LOG_API(isolate, "Set::Clear");
+  LOG_API(isolate, Set, Clear);
   ENTER_V8(isolate);
   i::JSSet::Clear(self);
 }
 
 
 MaybeLocal<Set> Set::Add(Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION(context, "Set::Add", Set);
+  PREPARE_FOR_EXECUTION(context, Set, Add, Set);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> result;
   i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6422,7 +6408,7 @@
 
 
 Maybe<bool> Set::Has(Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Set::Has", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Set, Has, bool);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> result;
   i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6435,7 +6421,7 @@
 
 
 Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Set::Delete", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Set, Delete, bool);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> result;
   i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6451,17 +6437,24 @@
   i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
   i::Factory* factory = isolate->factory();
-  LOG_API(isolate, "Set::AsArray");
+  LOG_API(isolate, Set, AsArray);
   ENTER_V8(isolate);
   i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(obj->table()));
   int length = table->NumberOfElements();
   i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
-  for (int i = 0; i < length; ++i) {
-    i::Object* key = table->KeyAt(i);
-    if (!key->IsTheHole()) {
-      result->set(i, key);
+  int result_index = 0;
+  {
+    i::DisallowHeapAllocation no_gc;
+    int capacity = table->UsedCapacity();
+    i::Oddball* the_hole = isolate->heap()->the_hole_value();
+    for (int i = 0; i < capacity; ++i) {
+      i::Object* key = table->KeyAt(i);
+      if (key == the_hole) continue;
+      result->set(result_index++, key);
     }
   }
+  DCHECK_EQ(result_index, result->length());
+  DCHECK_EQ(result_index, length);
   i::Handle<i::JSArray> result_array =
       factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
   return Utils::ToLocal(result_array);
@@ -6469,7 +6462,7 @@
 
 
 MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
-  PREPARE_FOR_EXECUTION(context, "Promise::Resolver::New", Resolver);
+  PREPARE_FOR_EXECUTION(context, Promise_Resolver, New, Resolver);
   i::Handle<i::Object> result;
   has_pending_exception =
       !i::Execution::Call(isolate, isolate->promise_create(),
@@ -6494,7 +6487,7 @@
 
 Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
                                        Local<Value> value) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
   has_pending_exception =
@@ -6515,7 +6508,7 @@
 
 Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
                                       Local<Value> value) {
-  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
   has_pending_exception =
@@ -6538,7 +6531,7 @@
 
 MaybeLocal<Promise> DoChain(Value* value, Local<Context> context,
                             Local<Function> handler) {
-  PREPARE_FOR_EXECUTION(context, "Promise::Chain", Promise);
+  PREPARE_FOR_EXECUTION(context, Promise, Chain, Promise);
   auto self = Utils::OpenHandle(value);
   i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
   i::Handle<i::Object> result;
@@ -6566,7 +6559,7 @@
 
 MaybeLocal<Promise> Promise::Catch(Local<Context> context,
                                    Local<Function> handler) {
-  PREPARE_FOR_EXECUTION(context, "Promise::Catch", Promise);
+  PREPARE_FOR_EXECUTION(context, Promise, Catch, Promise);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
   i::Handle<i::Object> result;
@@ -6586,7 +6579,7 @@
 
 MaybeLocal<Promise> Promise::Then(Local<Context> context,
                                   Local<Function> handler) {
-  PREPARE_FOR_EXECUTION(context, "Promise::Then", Promise);
+  PREPARE_FOR_EXECUTION(context, Promise, Then, Promise);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
   i::Handle<i::Object> result;
@@ -6607,7 +6600,7 @@
 bool Promise::HasHandler() {
   i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
   i::Isolate* isolate = promise->GetIsolate();
-  LOG_API(isolate, "Promise::HasRejectHandler");
+  LOG_API(isolate, Promise, HasRejectHandler);
   ENTER_V8(isolate);
   i::Handle<i::Symbol> key = isolate->factory()->promise_has_handler_symbol();
   return i::JSReceiver::GetDataProperty(promise, key)->IsTrue();
@@ -6642,7 +6635,7 @@
 
 MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
                              Local<Object> local_handler) {
-  PREPARE_FOR_EXECUTION(context, "Proxy::New", Proxy);
+  PREPARE_FOR_EXECUTION(context, Proxy, New, Proxy);
   i::Handle<i::JSReceiver> target = Utils::OpenHandle(*local_target);
   i::Handle<i::JSReceiver> handler = Utils::OpenHandle(*local_handler);
   Local<Proxy> result;
@@ -6665,7 +6658,7 @@
 v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
   i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
   i::Isolate* isolate = self->GetIsolate();
-  Utils::ApiCheck(!self->is_external(), "v8::ArrayBuffer::Externalize",
+  Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
                   "ArrayBuffer already externalized");
   self->set_is_external(true);
   isolate->heap()->UnregisterArrayBuffer(*self);
@@ -6692,7 +6685,7 @@
                   "Only externalized ArrayBuffers can be neutered");
   Utils::ApiCheck(obj->is_neuterable(), "v8::ArrayBuffer::Neuter",
                   "Only neuterable ArrayBuffers can be neutered");
-  LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
+  LOG_API(obj->GetIsolate(), ArrayBuffer, Neuter);
   ENTER_V8(isolate);
   obj->Neuter();
 }
@@ -6706,7 +6699,7 @@
 
 Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "v8::ArrayBuffer::New(size_t)");
+  LOG_API(i_isolate, ArrayBuffer, New);
   ENTER_V8(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
@@ -6721,7 +6714,7 @@
   // Embedders must guarantee that the external backing store is valid.
   CHECK(byte_length == 0 || data != NULL);
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
+  LOG_API(i_isolate, ArrayBuffer, New);
   ENTER_V8(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
@@ -6794,49 +6787,45 @@
   return static_cast<size_t>(obj->length_value());
 }
 
-
-#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size)                        \
-  Local<Type##Array> Type##Array::New(Local<ArrayBuffer> array_buffer,        \
-                                      size_t byte_offset, size_t length) {    \
-    i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate();     \
-    LOG_API(isolate,                                                          \
-            "v8::" #Type "Array::New(Local<ArrayBuffer>, size_t, size_t)");   \
-    ENTER_V8(isolate);                                                        \
-    if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue),    \
-                         "v8::" #Type                                         \
-                         "Array::New(Local<ArrayBuffer>, size_t, size_t)",    \
-                         "length exceeds max allowed value")) {               \
-      return Local<Type##Array>();                                            \
-    }                                                                         \
-    i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);    \
-    i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray(     \
-        i::kExternal##Type##Array, buffer, byte_offset, length);              \
-    return Utils::ToLocal##Type##Array(obj);                                  \
-  }                                                                           \
-  Local<Type##Array> Type##Array::New(                                        \
-      Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset,       \
-      size_t length) {                                                        \
-    CHECK(i::FLAG_harmony_sharedarraybuffer);                                 \
-    i::Isolate* isolate =                                                     \
-        Utils::OpenHandle(*shared_array_buffer)->GetIsolate();                \
-    LOG_API(isolate, "v8::" #Type                                             \
-                     "Array::New(Local<SharedArrayBuffer>, size_t, size_t)"); \
-    ENTER_V8(isolate);                                                        \
-    if (!Utils::ApiCheck(                                                     \
-            length <= static_cast<size_t>(i::Smi::kMaxValue),                 \
-            "v8::" #Type                                                      \
-            "Array::New(Local<SharedArrayBuffer>, size_t, size_t)",           \
-            "length exceeds max allowed value")) {                            \
-      return Local<Type##Array>();                                            \
-    }                                                                         \
-    i::Handle<i::JSArrayBuffer> buffer =                                      \
-        Utils::OpenHandle(*shared_array_buffer);                              \
-    i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray(     \
-        i::kExternal##Type##Array, buffer, byte_offset, length);              \
-    return Utils::ToLocal##Type##Array(obj);                                  \
+#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size)                     \
+  Local<Type##Array> Type##Array::New(Local<ArrayBuffer> array_buffer,     \
+                                      size_t byte_offset, size_t length) { \
+    i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate();  \
+    LOG_API(isolate, Type##Array, New);                                    \
+    ENTER_V8(isolate);                                                     \
+    if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \
+                         "v8::" #Type                                      \
+                         "Array::New(Local<ArrayBuffer>, size_t, size_t)", \
+                         "length exceeds max allowed value")) {            \
+      return Local<Type##Array>();                                         \
+    }                                                                      \
+    i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); \
+    i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray(  \
+        i::kExternal##Type##Array, buffer, byte_offset, length);           \
+    return Utils::ToLocal##Type##Array(obj);                               \
+  }                                                                        \
+  Local<Type##Array> Type##Array::New(                                     \
+      Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset,    \
+      size_t length) {                                                     \
+    CHECK(i::FLAG_harmony_sharedarraybuffer);                              \
+    i::Isolate* isolate =                                                  \
+        Utils::OpenHandle(*shared_array_buffer)->GetIsolate();             \
+    LOG_API(isolate, Type##Array, New);                                    \
+    ENTER_V8(isolate);                                                     \
+    if (!Utils::ApiCheck(                                                  \
+            length <= static_cast<size_t>(i::Smi::kMaxValue),              \
+            "v8::" #Type                                                   \
+            "Array::New(Local<SharedArrayBuffer>, size_t, size_t)",        \
+            "length exceeds max allowed value")) {                         \
+      return Local<Type##Array>();                                         \
+    }                                                                      \
+    i::Handle<i::JSArrayBuffer> buffer =                                   \
+        Utils::OpenHandle(*shared_array_buffer);                           \
+    i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray(  \
+        i::kExternal##Type##Array, buffer, byte_offset, length);           \
+    return Utils::ToLocal##Type##Array(obj);                               \
   }
 
-
 TYPED_ARRAYS(TYPED_ARRAY_NEW)
 #undef TYPED_ARRAY_NEW
 
@@ -6844,7 +6833,7 @@
                               size_t byte_offset, size_t byte_length) {
   i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
   i::Isolate* isolate = buffer->GetIsolate();
-  LOG_API(isolate, "v8::DataView::New(Local<ArrayBuffer>, size_t, size_t)");
+  LOG_API(isolate, DataView, New);
   ENTER_V8(isolate);
   i::Handle<i::JSDataView> obj =
       isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
@@ -6857,8 +6846,7 @@
   CHECK(i::FLAG_harmony_sharedarraybuffer);
   i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*shared_array_buffer);
   i::Isolate* isolate = buffer->GetIsolate();
-  LOG_API(isolate,
-          "v8::DataView::New(Local<SharedArrayBuffer>, size_t, size_t)");
+  LOG_API(isolate, DataView, New);
   ENTER_V8(isolate);
   i::Handle<i::JSDataView> obj =
       isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
@@ -6874,7 +6862,7 @@
 v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
   i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
   i::Isolate* isolate = self->GetIsolate();
-  Utils::ApiCheck(!self->is_external(), "v8::SharedArrayBuffer::Externalize",
+  Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
                   "SharedArrayBuffer already externalized");
   self->set_is_external(true);
   isolate->heap()->UnregisterArrayBuffer(*self);
@@ -6902,7 +6890,7 @@
                                                     size_t byte_length) {
   CHECK(i::FLAG_harmony_sharedarraybuffer);
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "v8::SharedArrayBuffer::New(size_t)");
+  LOG_API(i_isolate, SharedArrayBuffer, New);
   ENTER_V8(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
@@ -6919,7 +6907,7 @@
   // Embedders must guarantee that the external backing store is valid.
   CHECK(byte_length == 0 || data != NULL);
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "v8::SharedArrayBuffer::New(void*, size_t)");
+  LOG_API(i_isolate, SharedArrayBuffer, New);
   ENTER_V8(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
@@ -6932,7 +6920,7 @@
 
 Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "Symbol::New()");
+  LOG_API(i_isolate, Symbol, New);
   ENTER_V8(i_isolate);
   i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
   if (!name.IsEmpty()) result->set_name(*Utils::OpenHandle(*name));
@@ -7005,7 +6993,7 @@
 
 Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, "Private::New()");
+  LOG_API(i_isolate, Private, New);
   ENTER_V8(i_isolate);
   i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
   if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
@@ -7202,7 +7190,7 @@
                                           ObjectSpace space,
                                           AllocationAction action) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  isolate->memory_allocator()->AddMemoryAllocationCallback(
+  isolate->heap()->memory_allocator()->AddMemoryAllocationCallback(
       callback, space, action);
 }
 
@@ -7210,8 +7198,7 @@
 void Isolate::RemoveMemoryAllocationCallback(
     MemoryAllocationCallback callback) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  isolate->memory_allocator()->RemoveMemoryAllocationCallback(
-      callback);
+  isolate->heap()->memory_allocator()->RemoveMemoryAllocationCallback(callback);
 }
 
 
@@ -7484,10 +7471,25 @@
 void Isolate::GetStackSample(const RegisterState& state, void** frames,
                              size_t frames_limit, SampleInfo* sample_info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  i::TickSample::GetStackSample(isolate, state, i::TickSample::kSkipCEntryFrame,
+#if defined(USE_SIMULATOR)
+  RegisterState regs;
+  regs.pc = state.pc;
+  regs.sp = state.sp;
+  regs.fp = state.fp;
+  i::SimulatorHelper::FillRegisters(isolate, &regs);
+#else
+  const RegisterState& regs = state;
+#endif
+  i::TickSample::GetStackSample(isolate, regs, i::TickSample::kSkipCEntryFrame,
                                 frames, frames_limit, sample_info);
 }
 
+size_t Isolate::NumberOfPhantomHandleResetsSinceLastCall() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  size_t result = isolate->global_handles()->NumberOfPhantomHandleResets();
+  isolate->global_handles()->ResetNumberOfPhantomHandleResets();
+  return result;
+}
 
 void Isolate::SetEventLogger(LogEventCallback that) {
   // Do not overwrite the event logger if we want to log explicitly.
@@ -7706,9 +7708,10 @@
 
 void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  if (isolate->code_range()->valid()) {
-    *start = isolate->code_range()->start();
-    *length_in_bytes = isolate->code_range()->size();
+  if (isolate->heap()->memory_allocator()->code_range()->valid()) {
+    *start = isolate->heap()->memory_allocator()->code_range()->start();
+    *length_in_bytes =
+        isolate->heap()->memory_allocator()->code_range()->size();
   } else {
     *start = NULL;
     *length_in_bytes = 0;
@@ -7874,6 +7877,10 @@
   return isolate->handle_scope_implementer()->GetMicrotasksScopeDepth();
 }
 
+bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8Isolate) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
+  return isolate->IsRunningMicrotasks();
+}
 
 String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
     : str_(NULL), length_(0) {
@@ -7918,11 +7925,10 @@
   i::DeleteArray(str_);
 }
 
-
 #define DEFINE_ERROR(NAME, name)                                         \
   Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) {      \
     i::Isolate* isolate = i::Isolate::Current();                         \
-    LOG_API(isolate, #NAME);                                             \
+    LOG_API(isolate, NAME, New);                                         \
     ENTER_V8(isolate);                                                   \
     i::Object* error;                                                    \
     {                                                                    \
@@ -8038,7 +8044,7 @@
 MaybeLocal<Value> Debug::Call(Local<Context> context,
                               v8::Local<v8::Function> fun,
                               v8::Local<v8::Value> data) {
-  PREPARE_FOR_EXECUTION(context, "v8::Debug::Call()", Value);
+  PREPARE_FOR_EXECUTION(context, Debug, Call, Value);
   i::Handle<i::Object> data_obj;
   if (data.IsEmpty()) {
     data_obj = isolate->factory()->undefined_value();
@@ -8063,7 +8069,7 @@
 
 MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
                                    v8::Local<v8::Value> obj) {
-  PREPARE_FOR_EXECUTION(context, "v8::Debug::GetMirror()", Value);
+  PREPARE_FOR_EXECUTION(context, Debug, GetMirror, Value);
   i::Debug* isolate_debug = isolate->debug();
   has_pending_exception = !isolate_debug->Load();
   RETURN_ON_FAILED_EXECUTION(Value);
@@ -8309,7 +8315,9 @@
 
 
 void CpuProfiler::SetIdle(bool is_idle) {
-  i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
+  i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(this);
+  if (!profiler->is_profiling()) return;
+  i::Isolate* isolate = profiler->isolate();
   v8::StateTag state = isolate->current_vm_state();
   DCHECK(state == v8::EXTERNAL || state == v8::IDLE);
   if (isolate->js_entry_sp() != NULL) return;
@@ -8521,11 +8529,11 @@
   return heap_profiler->PushHeapObjectsStats(stream, timestamp_us);
 }
 
-
 bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
-                                             int stack_depth) {
-  return reinterpret_cast<i::HeapProfiler*>(this)
-      ->StartSamplingHeapProfiler(sample_interval, stack_depth);
+                                             int stack_depth,
+                                             SamplingFlags flags) {
+  return reinterpret_cast<i::HeapProfiler*>(this)->StartSamplingHeapProfiler(
+      sample_interval, stack_depth, flags);
 }
 
 
@@ -8781,6 +8789,8 @@
     v8::AccessorNameGetterCallback getter) {
   // Leaving JavaScript.
   Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+  RuntimeCallTimerScope timer(isolate,
+                              &RuntimeCallStats::AccessorGetterCallback);
   Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
       getter));
   VMState<EXTERNAL> state(isolate);
@@ -8792,6 +8802,8 @@
 void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
                             v8::FunctionCallback callback) {
   Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+  RuntimeCallTimerScope timer(isolate,
+                              &RuntimeCallStats::InvokeFunctionCallback);
   Address callback_address =
       reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
   VMState<EXTERNAL> state(isolate);
diff --git a/src/arguments.h b/src/arguments.h
index 02090f9..53cea46 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -79,22 +79,29 @@
 #define CLOBBER_DOUBLE_REGISTERS()
 #endif
 
-#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name)                          \
-  static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate));  \
-  Type Name(int args_length, Object** args_object, Isolate* isolate) {     \
-    CLOBBER_DOUBLE_REGISTERS();                                            \
-    Type value;                                                            \
-    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), "V8." #Name);    \
-    Arguments args(args_length, args_object);                              \
-    if (FLAG_runtime_call_stats) {                                         \
-      RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
-      RuntimeCallTimerScope timer(isolate, &stats->Name);                  \
-      value = __RT_impl_##Name(args, isolate);                             \
-    } else {                                                               \
-      value = __RT_impl_##Name(args, isolate);                             \
-    }                                                                      \
-    return value;                                                          \
-  }                                                                        \
+// TODO(cbruni): add global flag to check whether any tracing events have been
+// enabled.
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name)                             \
+  static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate));     \
+                                                                              \
+  V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
+                                       Isolate* isolate) {                    \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Name);            \
+    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),                     \
+                 "V8.Runtime_" #Name);                                        \
+    Arguments args(args_length, args_object);                                 \
+    return __RT_impl_##Name(args, isolate);                                   \
+  }                                                                           \
+                                                                              \
+  Type Name(int args_length, Object** args_object, Isolate* isolate) {        \
+    CLOBBER_DOUBLE_REGISTERS();                                               \
+    if (FLAG_runtime_call_stats) {                                            \
+      return Stats_##Name(args_length, args_object, isolate);                 \
+    }                                                                         \
+    Arguments args(args_length, args_object);                                 \
+    return __RT_impl_##Name(args, isolate);                                   \
+  }                                                                           \
+                                                                              \
   static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
 
 #define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index b0b22b6..52ebe32 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -71,11 +71,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Assembler::target_address_at(pc_, host_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
@@ -118,19 +113,6 @@
   }
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
-                                   icache_flush_mode);
-}
-
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -276,7 +258,7 @@
   }
 }
 
-
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 62516e8..1ccc3a6 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -141,15 +141,6 @@
   }
 
   if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
-
-  if (cpu.implementer() == base::CPU::NVIDIA &&
-      cpu.variant() == base::CPU::NVIDIA_DENVER &&
-      cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
-    // TODO(jkummerow): This is turned off as an experiment to see if it
-    // affects crash rates. Keep an eye on crash reports and either remove
-    // coherent cache support permanently, or re-enable it!
-    // supported_ |= 1u << COHERENT_CACHE;
-  }
 #endif
 
   DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
@@ -212,18 +203,14 @@
 
 void CpuFeatures::PrintFeatures() {
   printf(
-    "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
-    "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
-    CpuFeatures::IsSupported(ARMv8),
-    CpuFeatures::IsSupported(ARMv7),
-    CpuFeatures::IsSupported(VFP3),
-    CpuFeatures::IsSupported(VFP32DREGS),
-    CpuFeatures::IsSupported(NEON),
-    CpuFeatures::IsSupported(SUDIV),
-    CpuFeatures::IsSupported(MLS),
-    CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
-    CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
-    CpuFeatures::IsSupported(COHERENT_CACHE));
+      "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
+      "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d",
+      CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
+      CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
+      CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
+      CpuFeatures::IsSupported(MLS),
+      CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
+      CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
 #ifdef __arm__
   bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
 #elif USE_EABI_HARDFLOAT
@@ -255,6 +242,42 @@
   return Assembler::is_constant_pool_load(pc_);
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_memory_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_memory_reference &&
+           updated_memory_reference < new_base + new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
+        icache_flush_mode);
+  } else {
+    UNREACHABLE();
+  }
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of Operand and MemOperand
@@ -1702,8 +1725,6 @@
                      int satpos,
                      const Operand& src,
                      Condition cond) {
-  // v6 and above.
-  DCHECK(CpuFeatures::IsSupported(ARMv7));
   DCHECK(!dst.is(pc) && !src.rm_.is(pc));
   DCHECK((satpos >= 0) && (satpos <= 31));
   DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@@ -2038,7 +2059,6 @@
 
 void Assembler::ldrd(Register dst1, Register dst2,
                      const MemOperand& src, Condition cond) {
-  DCHECK(IsEnabled(ARMv7));
   DCHECK(src.rm().is(no_reg));
   DCHECK(!dst1.is(lr));  // r14.
   DCHECK_EQ(0, dst1.code() % 2);
@@ -2053,7 +2073,6 @@
   DCHECK(!src1.is(lr));  // r14.
   DCHECK_EQ(0, src1.code() % 2);
   DCHECK_EQ(src1.code() + 1, src2.code());
-  DCHECK(IsEnabled(ARMv7));
   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
@@ -3371,6 +3390,69 @@
        0x5 * B9 | B6);
 }
 
+void Assembler::vsel(Condition cond, const DwVfpRegister dst,
+                     const DwVfpRegister src1, const DwVfpRegister src2) {
+  // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
+  // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) |
+  // 0(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+  int sz = 1;
+
+  // VSEL has a special (restricted) condition encoding.
+  //   eq(0b0000)... -> 0b00
+  //   ge(0b1010)... -> 0b10
+  //   gt(0b1100)... -> 0b11
+  //   vs(0b0110)... -> 0b01
+  // No other conditions are supported.
+  int vsel_cond = (cond >> 30) & 0x3;
+  if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
+    // We can implement some other conditions by swapping the inputs.
+    DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
+    std::swap(vn, vm);
+    std::swap(n, m);
+  }
+
+  emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
+       vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
+}
+
+void Assembler::vsel(Condition cond, const SwVfpRegister dst,
+                     const SwVfpRegister src1, const SwVfpRegister src2) {
+  // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
+  // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) |
+  // 0(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+  int sz = 0;
+
+  // VSEL has a special (restricted) condition encoding.
+  //   eq(0b0000)... -> 0b00
+  //   ge(0b1010)... -> 0b10
+  //   gt(0b1100)... -> 0b11
+  //   vs(0b0110)... -> 0b01
+  // No other conditions are supported.
+  int vsel_cond = (cond >> 30) & 0x3;
+  if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
+    // We can implement some other conditions by swapping the inputs.
+    DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
+    std::swap(vn, vm);
+    std::swap(n, m);
+  }
+
+  emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
+       vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
+}
 
 void Assembler::vsqrt(const DwVfpRegister dst,
                       const DwVfpRegister src,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 08ad64c..26e062b 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -57,6 +57,12 @@
 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
   V(r0)  V(r1)  V(r2)  V(r3)  V(r4)  V(r5)  V(r6)  V(r7)  V(r8)
 
+#define FLOAT_REGISTERS(V)                                \
+  V(s0)  V(s1)  V(s2)  V(s3)  V(s4)  V(s5)  V(s6)  V(s7)  \
+  V(s8)  V(s9)  V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
+  V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
+  V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
+
 #define DOUBLE_REGISTERS(V)                               \
   V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
   V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
@@ -154,6 +160,10 @@
     DCHECK(is_valid());
     return 1 << reg_code;
   }
+  static SwVfpRegister from_code(int code) {
+    SwVfpRegister r = {code};
+    return r;
+  }
   void split_code(int* vm, int* m) const {
     DCHECK(is_valid());
     *m = reg_code & 0x1;
@@ -163,9 +173,10 @@
   int reg_code;
 };
 
+typedef SwVfpRegister FloatRegister;
 
 // Double word VFP register.
-struct DoubleRegister {
+struct DwVfpRegister {
   enum Code {
 #define REGISTER_CODE(R) kCode_##R,
     DOUBLE_REGISTERS(REGISTER_CODE)
@@ -187,7 +198,7 @@
   const char* ToString();
   bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
-  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+  bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
   int code() const {
     DCHECK(is_valid());
     return reg_code;
@@ -197,8 +208,8 @@
     return 1 << reg_code;
   }
 
-  static DoubleRegister from_code(int code) {
-    DoubleRegister r = {code};
+  static DwVfpRegister from_code(int code) {
+    DwVfpRegister r = {code};
     return r;
   }
   void split_code(int* vm, int* m) const {
@@ -211,7 +222,7 @@
 };
 
 
-typedef DoubleRegister DwVfpRegister;
+typedef DwVfpRegister DoubleRegister;
 
 
 // Double word VFP register d0-15.
@@ -1225,6 +1236,17 @@
             const Condition cond = al);
   void vcmp(const SwVfpRegister src1, const float src2,
             const Condition cond = al);
+
+  // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
+  void vsel(const Condition cond,
+            const DwVfpRegister dst,
+            const DwVfpRegister src1,
+            const DwVfpRegister src2);
+  void vsel(const Condition cond,
+            const SwVfpRegister dst,
+            const SwVfpRegister src1,
+            const SwVfpRegister src2);
+
   void vsqrt(const DwVfpRegister dst,
              const DwVfpRegister src,
              const Condition cond = al);
@@ -1357,7 +1379,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   // Record the emission of a constant pool.
   //
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 1fffcb6..031b483 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -604,16 +604,9 @@
     // r0: number of arguments
     // r1: constructor function
     // r3: new target
-    if (is_api_function) {
-      __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ Call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(r0);
-      __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+    ParameterCount actual(r0);
+    __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -704,6 +697,140 @@
   Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the value to pass to the generator
+  //  -- r1 : the JSGeneratorObject to resume
+  //  -- r2 : the resume mode (tagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(r1);
+
+  // Store input value into generator object.
+  __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOffset));
+  __ RecordWriteField(r1, JSGeneratorObject::kInputOffset, r0, r3,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
+  __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  __ mov(ip, Operand(step_in_enabled));
+  __ ldrb(ip, MemOperand(ip));
+  __ cmp(ip, Operand(0));
+  __ b(eq, &skip_flooding);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r2, r4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r1, r2);
+    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Push receiver.
+  __ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+  __ Push(ip);
+
+  // ----------- S t a t e -------------
+  //  -- r1    : the JSGeneratorObject to resume
+  //  -- r2    : the resume mode (tagged)
+  //  -- r4    : generator function
+  //  -- cp    : generator context
+  //  -- lr    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r3,
+         FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
+    __ b(mi, &done_loop);
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ b(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+  __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
+  __ b(ne, &old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r0,
+         FieldMemOperand(r0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ SmiUntag(r0);
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ Move(r3, r1);
+    __ Move(r1, r4);
+    __ ldr(r5, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    __ Jump(r5);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    DCHECK(!FLAG_enable_embedded_constant_pool);
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(lr, fp);
+    __ Move(fp, sp);
+    __ Push(cp, r4);
+
+    // Restore the operand stack.
+    __ ldr(r0, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+    __ ldr(r3, FieldMemOperand(r0, FixedArray::kLengthOffset));
+    __ add(r0, r0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ add(r3, r0, Operand(r3, LSL, kPointerSizeLog2 - 1));
+    {
+      Label done_loop, loop;
+      __ bind(&loop);
+      __ cmp(r0, r3);
+      __ b(eq, &done_loop);
+      __ ldr(ip, MemOperand(r0, kPointerSize, PostIndex));
+      __ Push(ip);
+      __ b(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+    __ str(ip, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+
+    // Resume the generator function at the continuation.
+    __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+    __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+    __ add(r3, r3, Operand(r2, ASR, 1));
+    __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+    __ Move(r0, r1);  // Continuation expects generator object in r0.
+    __ Jump(r3);
+  }
+}
 
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
@@ -832,7 +959,6 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
-
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
 // stack left to right.  The actual argument count matches the formal parameter
@@ -850,14 +976,16 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ PushStandardFrame(r1);
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into kInterpreterBytecodeRegister.
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   Register debug_info = kInterpreterBytecodeArrayRegister;
   DCHECK(!debug_info.is(r0));
@@ -869,8 +997,12 @@
   __ ldr(kInterpreterBytecodeArrayRegister,
          FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex), ne);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ CompareRoot(kInterpreterBytecodeArrayRegister,
+                 Heap::kUndefinedValueRootIndex);
+  __ b(eq, &bytecode_array_not_present);
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ SmiTst(kInterpreterBytecodeArrayRegister);
     __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
     __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
@@ -878,8 +1010,12 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
-  // Push new.target, bytecode array and zero for bytecode array offset.
-  __ mov(r0, Operand(0));
+  // Load the initial bytecode offset.
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
   __ Push(r3, kInterpreterBytecodeArrayRegister, r0);
 
   // Allocate the local and temporary register file on the stack.
@@ -911,18 +1047,8 @@
     __ b(&loop_header, ge);
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load accumulator, register file, bytecode offset, dispatch table into
-  // registers.
+  // Load accumulator and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ add(kInterpreterRegisterFileRegister, fp,
-         Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ mov(kInterpreterBytecodeOffsetRegister,
-         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
   __ mov(kInterpreterDispatchTableRegister,
          Operand(ExternalReference::interpreter_dispatch_table_address(
              masm->isolate())));
@@ -932,35 +1058,33 @@
                          kInterpreterBytecodeOffsetRegister));
   __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
                         kPointerSizeLog2));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(ip);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
-}
+  // The return value is in r0.
 
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
-
-  // The return value is in accumulator, which is already in r0.
+  // Get the arguments + reciever count.
+  __ ldr(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ ldr(r2, FieldMemOperand(r2, BytecodeArray::kParameterSizeOffset));
 
   // Leave the frame (also dropping the register file).
   __ LeaveFrame(StackFrame::JAVA_SCRIPT);
 
-  // Drop receiver + arguments and return.
-  __ ldr(ip, FieldMemOperand(kInterpreterBytecodeArrayRegister,
-                             BytecodeArray::kParameterSizeOffset));
-  __ add(sp, sp, ip, LeaveCC);
+  __ add(sp, sp, r2, LeaveCC);
   __ Jump(lr);
-}
 
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ bind(&bytecode_array_not_present);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+  __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(r1, r4, r5);
+  __ Jump(r4);
+}
 
 static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
                                          Register limit, Register scratch) {
@@ -974,7 +1098,6 @@
   __ b(gt, &loop_header);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -1000,7 +1123,6 @@
           RelocInfo::CODE_TARGET);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -1025,25 +1147,24 @@
   __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ Move(r2, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
+                         Code::kHeaderSize - kHeapObjectTag));
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register and dispatch table register.
-  __ add(kInterpreterRegisterFileRegister, fp,
-         Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  // Initialize the dispatch table register.
   __ mov(kInterpreterDispatchTableRegister,
          Operand(ExternalReference::interpreter_dispatch_table_address(
              masm->isolate())));
 
-  // Get the context from the frame.
-  __ ldr(kContextRegister,
-         MemOperand(kInterpreterRegisterFileRegister,
-                    InterpreterFrameConstants::kContextFromRegisterPointer));
-
   // Get the bytecode array pointer from the frame.
-  __ ldr(
-      kInterpreterBytecodeArrayRegister,
-      MemOperand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+  __ ldr(kInterpreterBytecodeArrayRegister,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1056,9 +1177,7 @@
 
   // Get the target bytecode offset from the frame.
   __ ldr(kInterpreterBytecodeOffsetRegister,
-         MemOperand(
-             kInterpreterRegisterFileRegister,
-             InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
   // Dispatch to the target bytecode.
@@ -1066,63 +1185,169 @@
                          kInterpreterBytecodeOffsetRegister));
   __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
                         kPointerSizeLog2));
-  __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ mov(pc, ip);
 }
 
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ push(r1);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register.
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : argument count (preserved for callee)
+  //  -- r3 : new target (preserved for callee)
+  //  -- r1 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register argument_count = r0;
+  Register closure = r1;
+  Register new_target = r3;
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = argument_count;
+  Register index = r2;
+  __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(map,
+         FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ cmp(index, Operand(Smi::FromInt(2)));
+  __ b(lt, &gotta_call_runtime);
+
+  // Find literals.
+  // r3  : native context
+  // r2  : length / index
+  // r0  : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = r3;
+  __ ldr(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = r1;
+  Register array_pointer = r5;
+
+  // Does the native context match?
+  __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index));
+  __ ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousContext));
+  __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ cmp(temp, native_context);
+  __ b(ne, &loop_bottom);
+  // OSR id set to none?
+  __ ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
+  __ b(ne, &loop_bottom);
+  // Literals available?
+  __ ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ ldr(r4, MemOperand(sp, 0));
+  __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+  __ push(index);
+  __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = r4;
+  __ ldr(entry,
+         FieldMemOperand(array_pointer,
+                         SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, r5);
+
+  // Link the closure into the optimized function list.
+  // r4 : code entry
+  // r3 : native context
+  // r1 : closure
+  __ ldr(r5,
+         ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ str(closure,
+         ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  // Save closure before the write barrier.
+  __ mov(r5, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0,
+                            kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ mov(closure, r5);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ Jump(entry);
+
+  __ bind(&loop_bottom);
+  __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ cmp(index, Operand(Smi::FromInt(1)));
+  __ b(gt, &loop_top);
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+                                         SharedFunctionInfo::kSharedCodeIndex));
+  __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ ldr(entry,
+         FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ and_(r5, r5, Operand(Code::KindField::kMask));
+  __ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
+  __ cmp(r5, Operand(Code::BUILTIN));
+  __ b(eq, &gotta_call_runtime_no_stack);
+  // Yes, install the full code.
+  __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, r5);
+  __ Jump(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
@@ -1252,14 +1477,17 @@
   __ SmiUntag(r6);
   // Switch on the state.
   Label with_tos_register, unknown_state;
-  __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ cmp(r6,
+         Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
   __ b(ne, &with_tos_register);
   __ add(sp, sp, Operand(1 * kPointerSize));  // Remove state.
   __ Ret();
 
   __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
   __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
-  __ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
+  __ cmp(r6,
+         Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
   __ b(ne, &unknown_state);
   __ add(sp, sp, Operand(2 * kPointerSize));  // Remove state.
   __ Ret();
@@ -1474,28 +1702,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0    : argc
-  //  -- sp[0] : first argument (left-hand side)
-  //  -- sp[4] : receiver (right-hand side)
-  // -----------------------------------
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ ldr(InstanceOfDescriptor::LeftRegister(),
-           MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
-    __ ldr(InstanceOfDescriptor::RightRegister(),
-           MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ Ret(2);
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
@@ -2387,6 +2593,30 @@
           RelocInfo::CODE_TARGET);
 }
 
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r1);
+  __ Push(r1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r1);
+  __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(r1, r2);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 31e3e95..0224f9d 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -55,12 +55,6 @@
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -73,11 +67,6 @@
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1072,8 +1061,6 @@
   }
   // Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
 
-  __ VFPEnsureFPSCRState(r3);
-
   // Check result for exception sentinel.
   Label exception_returned;
   __ CompareRoot(r0, Heap::kExceptionRootIndex);
@@ -1183,7 +1170,6 @@
   __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
   // Set up the reserved register for 0.0.
   __ vmov(kDoubleRegZero, 0.0);
-  __ VFPEnsureFPSCRState(r4);
 
   // Get address of argv, see stm above.
   // r0: code entry
@@ -1333,126 +1319,6 @@
 }
 
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = r1;              // Object (lhs).
-  Register const function = r0;            // Function (rhs).
-  Register const object_map = r2;          // Map of {object}.
-  Register const function_map = r3;        // Map of {function}.
-  Register const function_prototype = r4;  // Prototype of {function}.
-  Register const scratch = r5;
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ b(ne, &fast_case);
-  __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-  __ b(ne, &fast_case);
-  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
-  __ Ret();
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
-  __ b(ne, &slow_case);
-  __ LoadRoot(r0, Heap::kFalseValueRootIndex);
-  __ Ret();
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
-  __ b(ne, &slow_case);
-
-  // Go to the runtime if the function is not a constructor.
-  __ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
-  __ tst(scratch, Operand(1 << Map::kIsConstructor));
-  __ b(eq, &slow_case);
-
-  // Ensure that {function} has an instance prototype.
-  __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
-  __ b(ne, &slow_case);
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ ldr(function_prototype,
-         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
-  __ b(ne, &function_prototype_valid);
-  __ ldr(function_prototype,
-         FieldMemOperand(function_prototype, Map::kPrototypeOffset));
-  __ bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Register const object_instance_type = function_map;
-  Register const map_bit_field = function_map;
-  Register const null = scratch;
-  Register const result = r0;
-
-  Label done, loop, fast_runtime_fallback;
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ LoadRoot(null, Heap::kNullValueRootIndex);
-  __ bind(&loop);
-
-  // Check if the object needs to be access checked.
-  __ ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
-  __ tst(map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ b(ne, &fast_runtime_fallback);
-  // Check if the current object is a Proxy.
-  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
-  __ b(eq, &fast_runtime_fallback);
-
-  __ ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ cmp(object, function_prototype);
-  __ b(eq, &done);
-  __ cmp(object, null);
-  __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ b(ne, &loop);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-  __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
-  __ Ret();
-
-  // Found Proxy or access check needed: Call the runtime
-  __ bind(&fast_runtime_fallback);
-  __ Push(object, function_prototype);
-  // Invalidate the instanceof cache.
-  __ Move(scratch, Smi::FromInt(0));
-  __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ Push(object, function);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
-
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
@@ -3227,7 +3093,6 @@
   // GC safe. The RegExp backend also relies on this.
   __ str(lr, MemOperand(sp, 0));
   __ blx(ip);  // Call the C++ function.
-  __ VFPEnsureFPSCRState(r2);
   __ ldr(pc, MemOperand(sp, 0));
 }
 
@@ -3839,8 +3704,8 @@
   __ bind(&not_array);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ b(ne, &miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
                                                receiver, name, feedback,
                                                receiver_map, scratch1, r9);
@@ -3984,8 +3849,8 @@
   __ bind(&not_array);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ b(ne, &miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
       scratch1, scratch2);
@@ -4562,15 +4427,15 @@
   __ bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ str(r2, MemOperand(r0, JSObject::kMapOffset));
+  __ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
   __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
-  __ str(r3, MemOperand(r0, JSObject::kPropertiesOffset));
-  __ str(r3, MemOperand(r0, JSObject::kElementsOffset));
+  __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ add(r1, r0, Operand(JSObject::kHeaderSize));
+  __ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
 
   // ----------- S t a t e -------------
-  //  -- r0 : result (untagged)
+  //  -- r0 : result (tagged)
   //  -- r1 : result fields (untagged)
   //  -- r5 : result end (untagged)
   //  -- r2 : initial map
@@ -4588,10 +4453,6 @@
   {
     // Initialize all in-object fields with undefined.
     __ InitializeFieldsWithFiller(r1, r5, r6);
-
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ add(r0, r0, Operand(kHeapObjectTag));
     __ Ret();
   }
   __ bind(&slack_tracking);
@@ -4610,10 +4471,6 @@
     __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
     __ InitializeFieldsWithFiller(r1, r5, r6);
 
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ add(r0, r0, Operand(kHeapObjectTag));
-
     // Check if we can finalize the instance size.
     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
     __ tst(r3, Operand(Map::ConstructionCounter::kMask));
@@ -4640,10 +4497,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(r2);
   }
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ sub(r0, r0, Operand(kHeapObjectTag));
   __ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
   __ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ sub(r5, r5, Operand(kHeapObjectTag));
   __ b(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4662,20 +4519,20 @@
   // -----------------------------------
   __ AssertFunction(r1);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make r2 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ mov(r2, fp);
-    __ b(&loop_entry);
-    __ bind(&loop);
+  // Make r2 point to the JavaScript frame.
+  __ mov(r2, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
     __ cmp(ip, r1);
-    __ b(ne, &loop);
+    __ b(eq, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4706,7 +4563,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the rest parameter array in r0.
@@ -4748,7 +4605,7 @@
     Label allocate, done_allocate;
     __ mov(r1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
     __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
-    __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+    __ Allocate(r1, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in r3.
@@ -4804,23 +4661,40 @@
   // -----------------------------------
   __ AssertFunction(r1);
 
+  // Make r9 point to the JavaScript frame.
+  __ mov(r9, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ ldr(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ ldr(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
+    __ cmp(ip, r1);
+    __ b(eq, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
+  }
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   __ ldr(r2,
          FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ add(r3, fp, Operand(r2, LSL, kPointerSizeLog2 - 1));
+  __ add(r3, r9, Operand(r2, LSL, kPointerSizeLog2 - 1));
   __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
 
   // r1 : function
   // r2 : number of parameters (tagged)
   // r3 : parameters pointer
+  // r9 : JavaScript frame pointer
   // Registers used over whole function:
   //  r5 : arguments count (tagged)
   //  r6 : mapped parameter count (tagged)
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
-  __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r4, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
   __ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(eq, &adaptor_frame);
@@ -4863,7 +4737,7 @@
   __ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
+  __ Allocate(r9, r0, r9, r4, &runtime, NO_ALLOCATION_FLAGS);
 
   // r0 = address of new object(s) (tagged)
   // r2 = argument count (smi-tagged)
@@ -5009,20 +4883,20 @@
   // -----------------------------------
   __ AssertFunction(r1);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make r2 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ mov(r2, fp);
-    __ b(&loop_entry);
-    __ bind(&loop);
+  // Make r2 point to the JavaScript frame.
+  __ mov(r2, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
     __ cmp(ip, r1);
-    __ b(ne, &loop);
+    __ b(eq, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -5060,7 +4934,7 @@
   Label allocate, done_allocate;
   __ mov(r1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
   __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
-  __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+  __ Allocate(r1, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in r3.
@@ -5423,7 +5297,11 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
+
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
 
   // context save
   __ push(context);
@@ -5457,7 +5335,7 @@
 
   // Allocate the v8::Arguments structure in the arguments' space since
   // it's not controlled by GC.
-  const int kApiStackSpace = 4;
+  const int kApiStackSpace = 3;
 
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
@@ -5474,9 +5352,6 @@
   // FunctionCallbackInfo::length_ = argc
   __ mov(ip, Operand(argc()));
   __ str(ip, MemOperand(r0, 2 * kPointerSize));
-  // FunctionCallbackInfo::is_construct_call_ = 0
-  __ mov(ip, Operand::Zero());
-  __ str(ip, MemOperand(r0, 3 * kPointerSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5493,8 +5368,8 @@
   }
   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   int stack_space = 0;
-  MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
-  MemOperand* stack_space_operand = &is_construct_call_operand;
+  MemOperand length_operand = MemOperand(sp, 3 * kPointerSize);
+  MemOperand* stack_space_operand = &length_operand;
   stack_space = argc() + FCA::kArgsLength + 1;
   stack_space_operand = NULL;
 
@@ -5505,16 +5380,36 @@
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- sp[0]                        : name
-  //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- r2                           : api_function_address
-  // -----------------------------------
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
 
-  Register api_function_address = ApiGetterDescriptor::function_address();
-  DCHECK(api_function_address.is(r2));
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
+  Register scratch = r4;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
 
+  Register api_function_address = r2;
+
+  __ push(receiver);
+  // Push data from AccessorInfo.
+  __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+  __ push(scratch);
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Push(scratch, scratch);
+  __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+  __ Push(scratch, holder);
+  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+  __ push(scratch);
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
 
@@ -5534,6 +5429,10 @@
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+  __ ldr(api_function_address,
+         FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
   // +3 is to skip prolog, return address and name handle.
   MemOperand return_value_operand(
       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5541,7 +5440,6 @@
                            kStackUnwindSpace, NULL, return_value_operand, NULL);
 }
 
-
 #undef __
 
 }  // namespace internal
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7e1a550..4014aba 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -450,6 +450,7 @@
   __ mov(lr, Operand(length, LSL, 2));
   __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
   __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+  __ sub(array, array, Operand(kHeapObjectTag));
   // array: destination FixedDoubleArray, not tagged as heap object.
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   // r4: source FixedArray.
@@ -594,11 +595,13 @@
   __ add(array_size, array_size, Operand(length, LSL, 1));
   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
               NO_ALLOCATION_FLAGS);
-  // array: destination FixedArray, not tagged as heap object
+  // array: destination FixedArray, tagged as heap object
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
+  __ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
+
+  __ sub(array, array, Operand(kHeapObjectTag));
 
   // Prepare for conversion loop.
   Register src_elements = elements;
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 287152a..20a898e 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1869,6 +1869,48 @@
         Unknown(instr);
       }
       break;
+    case 0x1C:
+      if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
+          (instr->Bit(4) == 0)) {
+        // VSEL* (floating-point)
+        bool dp_operation = (instr->SzValue() == 1);
+        switch (instr->Bits(21, 20)) {
+          case 0x0:
+            if (dp_operation) {
+              Format(instr, "vseleq.f64 'Dd, 'Dn, 'Dm");
+            } else {
+              Format(instr, "vseleq.f32 'Sd, 'Sn, 'Sm");
+            }
+            break;
+          case 0x1:
+            if (dp_operation) {
+              Format(instr, "vselvs.f64 'Dd, 'Dn, 'Dm");
+            } else {
+              Format(instr, "vselvs.f32 'Sd, 'Sn, 'Sm");
+            }
+            break;
+          case 0x2:
+            if (dp_operation) {
+              Format(instr, "vselge.f64 'Dd, 'Dn, 'Dm");
+            } else {
+              Format(instr, "vselge.f32 'Sd, 'Sn, 'Sm");
+            }
+            break;
+          case 0x3:
+            if (dp_operation) {
+              Format(instr, "vselgt.f64 'Dd, 'Dn, 'Dm");
+            } else {
+              Format(instr, "vselgt.f32 'Sd, 'Sn, 'Sm");
+            }
+            break;
+          default:
+            UNREACHABLE();  // Case analysis is exhaustive.
+            break;
+        }
+      } else {
+        Unknown(instr);
+      }
+      break;
     default:
       Unknown(instr);
       break;
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index b6cac76..4e8c95c 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -48,16 +48,11 @@
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
 
 
-const Register InstanceOfDescriptor::LeftRegister() { return r1; }
-const Register InstanceOfDescriptor::RightRegister() { return r0; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return r1; }
 const Register StringCompareDescriptor::RightRegister() { return r0; }
 
-
-const Register ApiGetterDescriptor::function_address() { return r2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return r0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
 
 const Register MathPowTaggedDescriptor::exponent() { return r2; }
 
@@ -70,6 +65,8 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return r0; }
+const Register HasPropertyDescriptor::KeyRegister() { return r3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -248,13 +245,16 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  // register state
+  // r0 -- number of arguments
+  // r1 -- function
+  // r2 -- allocation site with elements kind
+  Register registers[] = {r1, r2, r0};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
@@ -318,6 +318,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -398,9 +403,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
-      kInterpreterDispatchTableRegister};
+      kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+      kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -435,6 +439,16 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r0,  // the value to pass to the generator
+      r1,  // the JSGeneratorObject to resume
+      r2   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6af3d6c..d723251 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -355,37 +355,6 @@
 }
 
 
-void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
-                          Condition cond) {
-  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
-    DCHECK(!dst.is(pc) && !src.rm().is(pc));
-    DCHECK((satpos >= 0) && (satpos <= 31));
-
-    // These asserts are required to ensure compatibility with the ARMv7
-    // implementation.
-    DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
-    DCHECK(src.rs().is(no_reg));
-
-    Label done;
-    int satval = (1 << satpos) - 1;
-
-    if (cond != al) {
-      b(NegateCondition(cond), &done);  // Skip saturate if !condition.
-    }
-    if (!(src.is_reg() && dst.is(src.rm()))) {
-      mov(dst, src);
-    }
-    tst(dst, Operand(~satval));
-    b(eq, &done);
-    mov(dst, Operand::Zero(), LeaveCC, mi);  // 0 if negative.
-    mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
-    bind(&done);
-  } else {
-    usat(dst, satpos, src, cond);
-  }
-}
-
-
 void MacroAssembler::Load(Register dst,
                           const MemOperand& src,
                           Representation r) {
@@ -889,10 +858,8 @@
   // below doesn't support it yet.
   DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
 
-  // Generate two ldr instructions if ldrd is not available.
-  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
-      (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
-    CpuFeatureScope scope(this, ARMv7);
+  // Generate two ldr instructions if ldrd is not applicable.
+  if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
     ldrd(dst1, dst2, src, cond);
   } else {
     if ((src.am() == Offset) || (src.am() == NegOffset)) {
@@ -930,10 +897,8 @@
   // below doesn't support it yet.
   DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
 
-  // Generate two str instructions if strd is not available.
-  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
-      (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
-    CpuFeatureScope scope(this, ARMv7);
+  // Generate two str instructions if strd is not applicable.
+  if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
     strd(src1, src2, dst, cond);
   } else {
     MemOperand dst2(dst);
@@ -950,30 +915,12 @@
   }
 }
 
-
-void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
-  // If needed, restore wanted bits of FPSCR.
-  Label fpscr_done;
-  vmrs(scratch);
-  if (emit_debug_code()) {
-    Label rounding_mode_correct;
-    tst(scratch, Operand(kVFPRoundingModeMask));
-    b(eq, &rounding_mode_correct);
-    // Don't call Assert here, since Runtime_Abort could re-enter here.
-    stop("Default rounding mode not set");
-    bind(&rounding_mode_correct);
-  }
-  tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
-  b(ne, &fpscr_done);
-  orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
-  vmsr(scratch);
-  bind(&fpscr_done);
-}
-
-
 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
                                         const DwVfpRegister src,
                                         const Condition cond) {
+  // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+  // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
+  // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
   vsub(dst, src, kDoubleRegZero, cond);
 }
 
@@ -2003,6 +1950,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -2090,26 +2038,29 @@
       shift += 8;
       Operand bits_operand(bits);
       DCHECK(bits_operand.instructions_required(this) == 1);
-      add(result_end, source, bits_operand, SetCC, cond);
+      add(result_end, source, bits_operand, LeaveCC, cond);
       source = result_end;
       cond = cc;
     }
   }
-  b(cs, gc_required);
+
   cmp(result_end, Operand(alloc_limit));
   b(hi, gc_required);
-  str(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    add(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    str(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  add(result, result, Operand(kHeapObjectTag));
 }
 
 
 void MacroAssembler::Allocate(Register object_size, Register result,
                               Register result_end, Register scratch,
                               Label* gc_required, AllocationFlags flags) {
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -2185,7 +2136,7 @@
   } else {
     add(result_end, result, Operand(object_size), SetCC);
   }
-  b(cs, gc_required);
+
   cmp(result_end, Operand(alloc_limit));
   b(hi, gc_required);
 
@@ -2194,14 +2145,122 @@
     tst(result_end, Operand(kObjectAlignmentMask));
     Check(eq, kUnalignedAllocationInNewSpace);
   }
-  str(result_end, MemOperand(top_address));
-
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    add(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    str(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  add(result, result, Operand(kHeapObjectTag));
 }
 
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, Register scratch,
+                                  AllocationFlags flags) {
+  // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+  // is not specified. Other registers must not overlap.
+  DCHECK(!AreAliased(object_size, result, scratch, ip));
+  DCHECK(!AreAliased(result_end, result, scratch, ip));
+  DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  Register top_address = scratch;
+  mov(top_address, Operand(allocation_top));
+  ldr(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
+    Label aligned;
+    b(eq, &aligned);
+    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
+    bind(&aligned);
+  }
+
+  // Calculate new top using result. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
+  } else {
+    add(result_end, result, Operand(object_size), SetCC);
+  }
+
+  // Update allocation top. result temporarily holds the new top.
+  if (emit_debug_code()) {
+    tst(result_end, Operand(kObjectAlignmentMask));
+    Check(eq, kUnalignedAllocationInNewSpace);
+  }
+  // The top pointer is not updated for allocation folding dominators.
+  str(result_end, MemOperand(top_address));
+
+  add(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register scratch1, Register scratch2,
+                                  AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address register.
+  Register top_address = scratch1;
+  Register result_end = scratch2;
+  mov(top_address, Operand(allocation_top));
+  ldr(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
+    Label aligned;
+    b(eq, &aligned);
+    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
+    bind(&aligned);
+  }
+
+  // Calculate new top using result. Object size may be in words so a shift is
+  // required to get the number of bytes. We must preserve the ip register at
+  // this point, so we cannot just use add().
+  DCHECK(object_size > 0);
+  Register source = result;
+  Condition cond = al;
+  int shift = 0;
+  while (object_size != 0) {
+    if (((object_size >> shift) & 0x03) == 0) {
+      shift += 2;
+    } else {
+      int bits = object_size & (0xff << shift);
+      object_size -= bits;
+      shift += 8;
+      Operand bits_operand(bits);
+      DCHECK(bits_operand.instructions_required(this) == 1);
+      add(result_end, source, bits_operand, LeaveCC, cond);
+      source = result_end;
+      cond = cc;
+    }
+  }
+
+  // The top pointer is not updated for allocation folding dominators.
+  str(result_end, MemOperand(top_address));
+
+  add(result, result, Operand(kHeapObjectTag));
+}
 
 void MacroAssembler::AllocateTwoByteString(Register result,
                                            Register length,
@@ -2218,12 +2277,8 @@
   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate two-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result,
@@ -2247,12 +2302,8 @@
   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate one-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -2266,7 +2317,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result,
                       length,
@@ -2280,12 +2331,8 @@
                                                Register scratch1,
                                                Register scratch2,
                                                Label* gc_required) {
-  Allocate(ConsString::kSize,
-           result,
-           scratch1,
-           scratch2,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -2298,7 +2345,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result,
                       length,
@@ -2314,7 +2361,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -2414,12 +2461,6 @@
            DONT_DO_SMI_CHECK);
 
   vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-  // Force a canonical NaN.
-  if (emit_debug_code()) {
-    vmrs(ip);
-    tst(ip, Operand(kVFPDefaultNaNModeControlBit));
-    Assert(ne, kDefaultNaNModeNotSet);
-  }
   VFPCanonicalizeNaN(double_scratch);
   b(&store);
 
@@ -3129,6 +3170,17 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    tst(object, Operand(kSmiTagMask));
+    Check(ne, kOperandIsASmiAndNotAGeneratorObject);
+    push(object);
+    CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+    pop(object);
+    Check(eq, kOperandIsNotAGeneratorObject);
+  }
+}
 
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
@@ -3225,12 +3277,11 @@
                                         Register scratch2,
                                         Register heap_number_map,
                                         Label* gc_required,
-                                        TaggingMode tagging_mode,
                                         MutableMode mode) {
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
-           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+           NO_ALLOCATION_FLAGS);
 
   Heap::RootListIndex map_index = mode == MUTABLE
       ? Heap::kMutableHeapNumberMapRootIndex
@@ -3238,11 +3289,7 @@
   AssertIsRoot(heap_number_map, map_index);
 
   // Store heap number map in the allocated object.
-  if (tagging_mode == TAG_RESULT) {
-    str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-  } else {
-    str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
-  }
+  str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
 }
 
 
@@ -3267,7 +3314,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -3662,7 +3710,7 @@
 
 
 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
-  Usat(output_reg, 8, Operand(input_reg));
+  usat(output_reg, 8, Operand(input_reg));
 }
 
 
@@ -3770,7 +3818,7 @@
     Label* no_memento_found) {
   Label map_check;
   Label top_check;
-  ExternalReference new_space_allocation_top =
+  ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -3780,7 +3828,9 @@
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
   add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
-  eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+  mov(ip, Operand(new_space_allocation_top_adr));
+  ldr(ip, MemOperand(ip));
+  eor(scratch_reg, scratch_reg, Operand(ip));
   tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
   b(eq, &top_check);
   // The object is on a different page than allocation top. Bail out if the
@@ -3796,7 +3846,9 @@
   // we are below top.
   bind(&top_check);
   add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
-  cmp(scratch_reg, Operand(new_space_allocation_top));
+  mov(ip, Operand(new_space_allocation_top_adr));
+  ldr(ip, MemOperand(ip));
+  cmp(scratch_reg, ip);
   b(gt, no_memento_found);
   // Memento map check.
   bind(&map_check);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index f326304..8fa197c 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -19,8 +19,8 @@
 const Register kReturnRegister2 = {Register::kCode_r2};
 const Register kJSFunctionRegister = {Register::kCode_r1};
 const Register kContextRegister = {Register::kCode_r7};
+const Register kAllocateSizeRegister = {Register::kCode_r1};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
 const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
@@ -157,8 +157,6 @@
            int width,
            Condition cond = al);
   void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
-  void Usat(Register dst, int satpos, const Operand& src,
-            Condition cond = al);
 
   void Call(Label* target);
   void Push(Register src) { push(src); }
@@ -489,15 +487,6 @@
             const MemOperand& dst,
             Condition cond = al);
 
-  // Ensure that FPSCR contains values needed by JavaScript.
-  // We need the NaNModeControlBit to be sure that operations like
-  // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
-  // In VFP3 it will be always the Canonical NaN.
-  // In VFP2 it will be either the Canonical NaN or the negative version
-  // of the Canonical NaN. It doesn't matter if we have two values. The aim
-  // is to be sure to never generate the hole NaN.
-  void VFPEnsureFPSCRState(Register scratch);
-
   // If the value is a NaN, canonicalize the value else, do nothing.
   void VFPCanonicalizeNaN(const DwVfpRegister dst,
                           const DwVfpRegister src,
@@ -792,6 +781,15 @@
   void Allocate(Register object_size, Register result, Register result_end,
                 Register scratch, Label* gc_required, AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(int object_size, Register result, Register scratch1,
+                    Register scratch2, AllocationFlags flags);
+
+  void FastAllocate(Register object_size, Register result, Register result_end,
+                    Register scratch, AllocationFlags flags);
+
   void AllocateTwoByteString(Register result,
                              Register length,
                              Register scratch1,
@@ -826,7 +824,6 @@
                           Register scratch2,
                           Register heap_number_map,
                           Label* gc_required,
-                          TaggingMode tagging_mode = TAG_RESULT,
                           MutableMode mode = IMMUTABLE);
   void AllocateHeapNumberWithValue(Register result,
                                    DwVfpRegister value,
@@ -1326,6 +1323,10 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
   void AssertReceiver(Register object);
 
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 6c22a0a..1a870c5 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -387,7 +387,7 @@
         end = cur + words;
 
         while (cur < end) {
-          PrintF("  0x%08x:  0x%08x %10d",
+          PrintF("  0x%08" V8PRIxPTR ":  0x%08x %10d",
                  reinterpret_cast<intptr_t>(cur), *cur, *cur);
           HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
           int value = *cur;
@@ -449,8 +449,8 @@
         while (cur < end) {
           prev = cur;
           cur += dasm.InstructionDecode(buffer, cur);
-          PrintF("  0x%08x  %s\n",
-                 reinterpret_cast<intptr_t>(prev), buffer.start());
+          PrintF("  0x%08" V8PRIxPTR "  %s\n", reinterpret_cast<intptr_t>(prev),
+                 buffer.start());
         }
       } else if (strcmp(cmd, "gdb") == 0) {
         PrintF("relinquishing control to gdb\n");
@@ -1271,7 +1271,7 @@
 
 // Unsupported instructions use Format to print an error and stop execution.
 void Simulator::Format(Instruction* instr, const char* format) {
-  PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+  PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
          reinterpret_cast<intptr_t>(instr), format);
   UNIMPLEMENTED();
 }
@@ -4028,6 +4028,45 @@
         UNIMPLEMENTED();
       }
       break;
+    case 0x1C:
+      if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
+          (instr->Bit(4) == 0)) {
+        // VSEL* (floating-point)
+        bool condition_holds;
+        switch (instr->Bits(21, 20)) {
+          case 0x0:  // VSELEQ
+            condition_holds = (z_flag_ == 1);
+            break;
+          case 0x1:  // VSELVS
+            condition_holds = (v_flag_ == 1);
+            break;
+          case 0x2:  // VSELGE
+            condition_holds = (n_flag_ == v_flag_);
+            break;
+          case 0x3:  // VSELGT
+            condition_holds = ((z_flag_ == 0) && (n_flag_ == v_flag_));
+            break;
+          default:
+            UNREACHABLE();  // Case analysis is exhaustive.
+            break;
+        }
+        if (instr->SzValue() == 0x1) {
+          int n = instr->VFPNRegValue(kDoublePrecision);
+          int m = instr->VFPMRegValue(kDoublePrecision);
+          int d = instr->VFPDRegValue(kDoublePrecision);
+          double result = get_double_from_d_register(condition_holds ? n : m);
+          set_d_register_from_double(d, result);
+        } else {
+          int n = instr->VFPNRegValue(kSinglePrecision);
+          int m = instr->VFPMRegValue(kSinglePrecision);
+          int d = instr->VFPDRegValue(kSinglePrecision);
+          float result = get_float_from_s_register(condition_holds ? n : m);
+          set_s_register_from_float(d, result);
+        }
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
     default:
       UNIMPLEMENTED();
       break;
@@ -4048,7 +4087,8 @@
     v8::internal::EmbeddedVector<char, 256> buffer;
     dasm.InstructionDecode(buffer,
                            reinterpret_cast<byte*>(instr));
-    PrintF("  0x%08x  %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
+    PrintF("  0x%08" V8PRIxPTR "  %s\n", reinterpret_cast<intptr_t>(instr),
+           buffer.start());
   }
   if (instr->ConditionField() == kSpecialCondition) {
     DecodeSpecialCondition(instr);
diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h
index 6191216..8e46771 100644
--- a/src/arm64/assembler-arm64-inl.h
+++ b/src/arm64/assembler-arm64-inl.h
@@ -41,19 +41,6 @@
   }
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
-                                   icache_flush_mode);
-}
-
 inline int CPURegister::code() const {
   DCHECK(IsValid());
   return reg_code;
@@ -705,11 +692,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Assembler::target_address_at(pc_, host_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
@@ -868,7 +850,7 @@
   }
 }
 
-
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index 2471d5e..91563a4 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -51,26 +51,13 @@
   // Only use statically determined features for cross compile (snapshot).
   if (cross_compile) return;
 
-  // Probe for runtime features
-  base::CPU cpu;
-  if (cpu.implementer() == base::CPU::NVIDIA &&
-      cpu.variant() == base::CPU::NVIDIA_DENVER &&
-      cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
-    // TODO(jkummerow): This is turned off as an experiment to see if it
-    // affects crash rates. Keep an eye on crash reports and either remove
-    // coherent cache support permanently, or re-enable it!
-    // supported_ |= 1u << COHERENT_CACHE;
-  }
+  // We used to probe for coherent cache support, but on older CPUs it
+  // causes crashes (crbug.com/524337), and newer CPUs don't even have
+  // the feature any more.
 }
 
-
 void CpuFeatures::PrintTarget() { }
-
-
-void CpuFeatures::PrintFeatures() {
-  printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
-}
-
+void CpuFeatures::PrintFeatures() {}
 
 // -----------------------------------------------------------------------------
 // CPURegList utilities.
@@ -192,6 +179,41 @@
   return instr->IsLdrLiteralX();
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_) && old_base != new_base) {
+    Address updated_memory_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_memory_reference &&
+           updated_memory_reference < new_base + new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) =
+        updated_size_reference;
+  } else {
+    UNREACHABLE();
+  }
+}
 
 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
                                               Register reg3, Register reg4) {
@@ -294,13 +316,11 @@
 // Constant Pool.
 void ConstPool::RecordEntry(intptr_t data,
                             RelocInfo::Mode mode) {
-  DCHECK(mode != RelocInfo::COMMENT &&
-         mode != RelocInfo::POSITION &&
+  DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::POSITION &&
          mode != RelocInfo::STATEMENT_POSITION &&
-         mode != RelocInfo::CONST_POOL &&
-         mode != RelocInfo::VENEER_POOL &&
+         mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL &&
          mode != RelocInfo::CODE_AGE_SEQUENCE &&
-         mode != RelocInfo::DEOPT_REASON);
+         mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
   uint64_t raw_data = static_cast<uint64_t>(data);
   int offset = assm_->pc_offset();
   if (IsEmpty()) {
@@ -2878,11 +2898,12 @@
        (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
       (rmode == RelocInfo::INTERNAL_REFERENCE) ||
       (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
-      (rmode == RelocInfo::DEOPT_REASON) ||
+      (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
       (rmode == RelocInfo::GENERATOR_CONTINUATION)) {
     // Adjust code for new modes.
     DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
-           RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsPosition(rmode) ||
+           RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
+           RelocInfo::IsPosition(rmode) ||
            RelocInfo::IsInternalReference(rmode) ||
            RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
            RelocInfo::IsGeneratorContinuation(rmode));
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index 5460254..fac7a70 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -40,6 +40,12 @@
   R(x8)  R(x9)  R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
   R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
 
+#define FLOAT_REGISTERS(V)                               \
+  V(s0)  V(s1)  V(s2)  V(s3)  V(s4)  V(s5)  V(s6)  V(s7)  \
+  V(s8)  V(s9)  V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
+  V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
+  V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
+
 #define DOUBLE_REGISTERS(R)                               \
   R(d0)  R(d1)  R(d2)  R(d3)  R(d4)  R(d5)  R(d6)  R(d7)  \
   R(d8)  R(d9)  R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
@@ -366,7 +372,7 @@
                         const CPURegister& reg7 = NoCPUReg,
                         const CPURegister& reg8 = NoCPUReg);
 
-
+typedef FPRegister FloatRegister;
 typedef FPRegister DoubleRegister;
 
 // TODO(arm64) Define SIMD registers.
@@ -929,7 +935,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   int buffer_space() const;
 
diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc
index 44bfc17..be372e6 100644
--- a/src/arm64/builtins-arm64.cc
+++ b/src/arm64/builtins-arm64.cc
@@ -605,16 +605,9 @@
     // x0: number of arguments
     // x1: constructor function
     // x3: new target
-    if (is_api_function) {
-      __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ Call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(argc);
-      __ InvokeFunction(constructor, new_target, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+    ParameterCount actual(argc);
+    __ InvokeFunction(constructor, new_target, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -711,6 +704,132 @@
   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the value to pass to the generator
+  //  -- x1 : the JSGeneratorObject to resume
+  //  -- x2 : the resume mode (tagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(x1);
+
+  // Store input value into generator object.
+  __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOffset));
+  __ RecordWriteField(x1, JSGeneratorObject::kInputOffset, x0, x3,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ Ldr(cp, FieldMemOperand(x1, JSGeneratorObject::kContextOffset));
+  __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  __ Mov(x10, Operand(step_in_enabled));
+  __ Ldrb(x10, MemOperand(x10));
+  __ CompareAndBranch(x10, Operand(0), eq, &skip_flooding);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(x1, x2, x4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(x2, x1);
+    __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Push receiver.
+  __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
+  __ Push(x5);
+
+  // ----------- S t a t e -------------
+  //  -- x1      : the JSGeneratorObject to resume
+  //  -- x2      : the resume mode (tagged)
+  //  -- x4      : generator function
+  //  -- cp      : generator context
+  //  -- lr      : return address
+  //  -- jssp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(w10,
+         FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
+  __ PushMultipleTimes(x11, w10);
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+  __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
+  __ B(ne, &old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldr(x0,
+         FieldMemOperand(x0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ SmiUntag(x0);
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ Move(x3, x1);
+    __ Move(x1, x4);
+    __ Ldr(x5, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+    __ Jump(x5);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(lr, fp);
+    __ Move(fp, jssp);
+    __ Push(cp, x4);
+
+    // Restore the operand stack.
+    __ Ldr(x0, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
+    __ Ldr(w3, UntagSmiFieldMemOperand(x0, FixedArray::kLengthOffset));
+    __ Add(x0, x0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ Add(x3, x0, Operand(x3, LSL, kPointerSizeLog2));
+    {
+      Label done_loop, loop;
+      __ Bind(&loop);
+      __ Cmp(x0, x3);
+      __ B(eq, &done_loop);
+      __ Ldr(x10, MemOperand(x0, kPointerSize, PostIndex));
+      __ Push(x10);
+      __ B(&loop);
+      __ Bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
+    __ Str(x10, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
+
+    // Resume the generator function at the continuation.
+    __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldr(x10, FieldMemOperand(x10, SharedFunctionInfo::kCodeOffset));
+    __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
+    __ Ldrsw(x11,
+        UntagSmiFieldMemOperand(x1, JSGeneratorObject::kContinuationOffset));
+    __ Add(x10, x10, x11);
+    __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+    __ Str(x12, FieldMemOperand(x1, JSGeneratorObject::kContinuationOffset));
+    __ Move(x0, x1);  // Continuation expects generator object in x0.
+    __ Br(x10);
+  }
+}
 
 enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
 
@@ -844,7 +963,6 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
-
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
 // stack left to right.  The actual argument count matches the formal parameter
@@ -861,6 +979,8 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
@@ -868,8 +988,8 @@
   __ Push(lr, fp, cp, x1);
   __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into kInterpreterBytecodeRegister.
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   Register debug_info = kInterpreterBytecodeArrayRegister;
   Label load_debug_bytecode_array, bytecode_array_loaded;
@@ -881,8 +1001,12 @@
          FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
   __ Bind(&bytecode_array_loaded);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ CompareRoot(kInterpreterBytecodeArrayRegister,
+                 Heap::kUndefinedValueRootIndex);
+  __ B(eq, &bytecode_array_not_present);
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
                     kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
     __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
@@ -890,8 +1014,12 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
-  // Push new.target, bytecode array and zero for bytecode array offset.
-  __ Mov(x0, Operand(0));
+  // Load the initial bytecode offset.
+  __ Mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(x0, kInterpreterBytecodeOffsetRegister);
   __ Push(x3, kInterpreterBytecodeArrayRegister, x0);
 
   // Allocate the local and temporary register file on the stack.
@@ -921,18 +1049,8 @@
     __ Bind(&loop_header);
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load accumulator, register file, bytecode offset, dispatch table into
-  // registers.
+  // Load accumulator and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ Add(kInterpreterRegisterFileRegister, fp,
-         Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ Mov(kInterpreterBytecodeOffsetRegister,
-         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
   __ Mov(kInterpreterDispatchTableRegister,
          Operand(ExternalReference::interpreter_dispatch_table_address(
              masm->isolate())));
@@ -942,60 +1060,130 @@
                          kInterpreterBytecodeOffsetRegister));
   __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
   __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(ip0);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+  // The return value is in x0.
+
+  // Get the arguments + reciever count.
+  __ ldr(x1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ Ldr(w1, FieldMemOperand(x1, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments and return.
+  __ Drop(x1, 1);
+  __ Ret();
 
   // Load debug copy of the bytecode array.
   __ Bind(&load_debug_bytecode_array);
   __ Ldr(kInterpreterBytecodeArrayRegister,
          FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
   __ B(&bytecode_array_loaded);
-}
 
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
-
-  // The return value is in accumulator, which is already in x0.
-
-  // Leave the frame (also dropping the register file).
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ Bind(&bytecode_array_not_present);
   __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  // Drop receiver + arguments and return.
-  __ Ldr(w1, FieldMemOperand(kInterpreterBytecodeArrayRegister,
-                             BytecodeArray::kParameterSizeOffset));
-  __ Drop(x1, 1);
-  __ Ret();
+  __ Ldr(x7, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x7, FieldMemOperand(x7, SharedFunctionInfo::kCodeOffset));
+  __ Add(x7, x7, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(x1, x7, x5);
+  __ Jump(x7);
 }
 
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x2 : the address of the first argument to be pushed. Subsequent
+  //          arguments should be consecutive above this, in the same order as
+  //          they are to be pushed onto the stack.
+  //  -- x1 : the target to call (can be any Object).
+  // -----------------------------------
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register and dispatch table register.
-  __ Add(kInterpreterRegisterFileRegister, fp,
-         Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  // Find the address of the last argument.
+  __ add(x3, x0, Operand(1));  // Add one for receiver.
+  __ lsl(x3, x3, kPointerSizeLog2);
+  __ sub(x4, x2, x3);
+
+  // Push the arguments.
+  Label loop_header, loop_check;
+  __ Mov(x5, jssp);
+  __ Claim(x3, 1);
+  __ B(&loop_check);
+  __ Bind(&loop_header);
+  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+  __ Ldr(x3, MemOperand(x2, -kPointerSize, PostIndex));
+  __ Str(x3, MemOperand(x5, -kPointerSize, PreIndex));
+  __ Bind(&loop_check);
+  __ Cmp(x2, x4);
+  __ B(gt, &loop_header);
+
+  // Call the target.
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  // -- x0 : argument count (not including receiver)
+  // -- x3 : new target
+  // -- x1 : constructor to call
+  // -- x2 : address of the first argument
+  // -----------------------------------
+
+  // Find the address of the last argument.
+  __ add(x5, x0, Operand(1));  // Add one for receiver (to be constructed).
+  __ lsl(x5, x5, kPointerSizeLog2);
+
+  // Set stack pointer and where to stop.
+  __ Mov(x6, jssp);
+  __ Claim(x5, 1);
+  __ sub(x4, x6, x5);
+
+  // Push a slot for the receiver.
+  __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
+
+  Label loop_header, loop_check;
+  // Push the arguments.
+  __ B(&loop_check);
+  __ Bind(&loop_header);
+  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+  __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
+  __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
+  __ Bind(&loop_check);
+  __ Cmp(x6, x4);
+  __ B(gt, &loop_header);
+
+  // Call the constructor with x0, x1, and x3 unmodified.
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ LoadObject(x1, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
+                         Code::kHeaderSize - kHeapObjectTag));
+
+  // Initialize the dispatch table register.
   __ Mov(kInterpreterDispatchTableRegister,
          Operand(ExternalReference::interpreter_dispatch_table_address(
              masm->isolate())));
 
-  // Get the context from the frame.
-  __ Ldr(kContextRegister,
-         MemOperand(kInterpreterRegisterFileRegister,
-                    InterpreterFrameConstants::kContextFromRegisterPointer));
-
   // Get the bytecode array pointer from the frame.
-  __ Ldr(
-      kInterpreterBytecodeArrayRegister,
-      MemOperand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+  __ Ldr(kInterpreterBytecodeArrayRegister,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1008,9 +1196,7 @@
 
   // Get the target bytecode offset from the frame.
   __ Ldr(kInterpreterBytecodeOffsetRegister,
-         MemOperand(
-             kInterpreterRegisterFileRegister,
-             InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
   // Dispatch to the target bytecode.
@@ -1018,63 +1204,147 @@
                          kInterpreterBytecodeOffsetRegister));
   __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
   __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
-  __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(ip0);
 }
 
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ Push(x1);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register.
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : argument count (preserved for callee)
+  //  -- x3 : new target (preserved for callee)
+  //  -- x1 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = x1;
+  Register map = x13;
+  Register index = x2;
+  __ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(map,
+         FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
+  __ Cmp(index, Operand(2));
+  __ B(lt, &gotta_call_runtime);
+
+  // Find literals.
+  // x3  : native context
+  // x2  : length / index
+  // x13 : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = x4;
+  __ Ldr(native_context, NativeContextMemOperand());
+
+  __ Bind(&loop_top);
+  Register temp = x5;
+  Register array_pointer = x6;
+
+  // Does the native context match?
+  __ Add(array_pointer, map, Operand(index, LSL, kPointerSizeLog2));
+  __ Ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousContext));
+  __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ Cmp(temp, native_context);
+  __ B(ne, &loop_bottom);
+  // OSR id set to none?
+  __ Ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
+  __ B(ne, &loop_bottom);
+  // Literals available?
+  __ Ldr(temp, FieldMemOperand(array_pointer,
+                               SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
+  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Code available?
+  Register entry = x7;
+  __ Ldr(entry,
+         FieldMemOperand(array_pointer,
+                         SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ Bind(&install_optimized_code_and_tailcall);
+  __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, x5);
+
+  // Link the closure into the optimized function list.
+  // x7 : code entry
+  // x4 : native context
+  // x1 : closure
+  __ Ldr(x8,
+         ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ Str(x8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, x8, x13,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ Str(closure,
+         ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ Mov(x5, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, x5, x13,
+                            kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ Jump(entry);
+
+  __ Bind(&loop_bottom);
+  __ Sub(index, index, Operand(SharedFunctionInfo::kEntryLength));
+  __ Cmp(index, Operand(1));
+  __ B(gt, &loop_top);
+
+  // We found neither literals nor code.
+  __ B(&gotta_call_runtime);
+
+  __ Bind(&maybe_call_runtime);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+                                         SharedFunctionInfo::kSharedCodeIndex));
+  __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ B(&install_optimized_code_and_tailcall);
+
+  __ Bind(&try_shared);
+  // Is the full code valid?
+  __ Ldr(entry,
+         FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ and_(x5, x5, Operand(Code::KindField::kMask));
+  __ Mov(x5, Operand(x5, LSR, Code::KindField::kShift));
+  __ Cmp(x5, Operand(Code::BUILTIN));
+  __ B(eq, &gotta_call_runtime);
+  // Yes, install the full code.
+  __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, x5);
+  __ Jump(entry);
+
+  __ Bind(&gotta_call_runtime);
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
@@ -1225,15 +1495,19 @@
 
   // Switch on the state.
   Label with_tos_register, unknown_state;
-  __ CompareAndBranch(
-      state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
+  __ CompareAndBranch(state,
+                      static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS),
+                      ne, &with_tos_register);
   __ Drop(1);  // Remove state.
   __ Ret();
 
   __ Bind(&with_tos_register);
   // Reload TOS register.
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), x0.code());
   __ Peek(x0, kPointerSize);
-  __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
+  __ CompareAndBranch(state,
+                      static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER),
+                      ne, &unknown_state);
   __ Drop(2);  // Remove state and TOS.
   __ Ret();
 
@@ -1439,30 +1713,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x0      : argc
-  //  -- jssp[0] : first argument (left-hand side)
-  //  -- jssp[8] : receiver (right-hand side)
-  // -----------------------------------
-  ASM_LOCATION("Builtins::Generate_FunctionHasInstance");
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Ldr(InstanceOfDescriptor::LeftRegister(),
-           MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
-    __ Ldr(InstanceOfDescriptor::RightRegister(),
-           MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ Drop(2);
-  __ Ret();
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- x0       : argc
@@ -2434,80 +2684,33 @@
           RelocInfo::CODE_TARGET);
 }
 
-
 // static
-void Builtins::Generate_InterpreterPushArgsAndCallImpl(
-    MacroAssembler* masm, TailCallMode tail_call_mode) {
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
   // ----------- S t a t e -------------
-  //  -- x0 : the number of arguments (not including the receiver)
-  //  -- x2 : the address of the first argument to be pushed. Subsequent
-  //          arguments should be consecutive above this, in the same order as
-  //          they are to be pushed onto the stack.
-  //  -- x1 : the target to call (can be any Object).
+  //  -- x1 : requested object size (untagged)
+  //  -- lr : return address
   // -----------------------------------
-
-  // Find the address of the last argument.
-  __ add(x3, x0, Operand(1));  // Add one for receiver.
-  __ lsl(x3, x3, kPointerSizeLog2);
-  __ sub(x4, x2, x3);
-
-  // Push the arguments.
-  Label loop_header, loop_check;
-  __ Mov(x5, jssp);
-  __ Claim(x3, 1);
-  __ B(&loop_check);
-  __ Bind(&loop_header);
-  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
-  __ Ldr(x3, MemOperand(x2, -kPointerSize, PostIndex));
-  __ Str(x3, MemOperand(x5, -kPointerSize, PreIndex));
-  __ Bind(&loop_check);
-  __ Cmp(x2, x4);
-  __ B(gt, &loop_header);
-
-  // Call the target.
-  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
-                                            tail_call_mode),
-          RelocInfo::CODE_TARGET);
+  __ SmiTag(x1);
+  __ Push(x1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
 }
 
-
 // static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
   // ----------- S t a t e -------------
-  // -- x0 : argument count (not including receiver)
-  // -- x3 : new target
-  // -- x1 : constructor to call
-  // -- x2 : address of the first argument
+  //  -- x1 : requested object size (untagged)
+  //  -- lr : return address
   // -----------------------------------
-
-  // Find the address of the last argument.
-  __ add(x5, x0, Operand(1));  // Add one for receiver (to be constructed).
-  __ lsl(x5, x5, kPointerSizeLog2);
-
-  // Set stack pointer and where to stop.
-  __ Mov(x6, jssp);
-  __ Claim(x5, 1);
-  __ sub(x4, x6, x5);
-
-  // Push a slot for the receiver.
-  __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
-
-  Label loop_header, loop_check;
-  // Push the arguments.
-  __ B(&loop_check);
-  __ Bind(&loop_header);
-  // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
-  __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
-  __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
-  __ Bind(&loop_check);
-  __ Cmp(x6, x4);
-  __ B(gt, &loop_header);
-
-  // Call the constructor with x0, x1, and x3 unmodified.
-  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  __ SmiTag(x1);
+  __ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(x1, x2);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
-
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
   // ----------- S t a t e -------------
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index ee40535..a96b3df 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -43,12 +43,6 @@
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -77,11 +71,6 @@
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1323,11 +1312,6 @@
   __ Mov(jssp, csp);
   __ SetStackPointer(jssp);
 
-  // Configure the FPCR. We don't restore it, so this is technically not allowed
-  // according to AAPCS64. However, we only set default-NaN mode and this will
-  // be harmless for most C code. Also, it works for ARM.
-  __ ConfigureFPCR();
-
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
   // Set up the reserved register for 0.0.
@@ -1520,123 +1504,6 @@
 }
 
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = x1;              // Object (lhs).
-  Register const function = x0;            // Function (rhs).
-  Register const object_map = x2;          // Map of {object}.
-  Register const function_map = x3;        // Map of {function}.
-  Register const function_prototype = x4;  // Prototype of {function}.
-  Register const scratch = x5;
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex,
-                   &fast_case);
-  __ JumpIfNotRoot(object_map, Heap::kInstanceofCacheMapRootIndex, &fast_case);
-  __ LoadRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
-  __ Ret();
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ Bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
-                         &slow_case);
-  __ LoadRoot(x0, Heap::kFalseValueRootIndex);
-  __ Ret();
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ Bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
-                         &slow_case);
-
-  // Go to the runtime if the function is not a constructor.
-  __ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
-  __ Tbz(scratch, Map::kIsConstructor, &slow_case);
-
-  // Ensure that {function} has an instance prototype.
-  __ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ Ldr(function_prototype,
-         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  __ JumpIfNotObjectType(function_prototype, scratch, scratch, MAP_TYPE,
-                         &function_prototype_valid);
-  __ Ldr(function_prototype,
-         FieldMemOperand(function_prototype, Map::kPrototypeOffset));
-  __ Bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Register const object_instance_type = function_map;
-  Register const map_bit_field = function_map;
-  Register const null = scratch;
-  Register const result = x0;
-
-  Label done, loop, fast_runtime_fallback;
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ LoadRoot(null, Heap::kNullValueRootIndex);
-  __ Bind(&loop);
-
-  // Check if the object needs to be access checked.
-  __ Ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
-  __ TestAndBranchIfAnySet(map_bit_field, 1 << Map::kIsAccessCheckNeeded,
-                           &fast_runtime_fallback);
-  // Check if the current object is a Proxy.
-  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
-  __ B(eq, &fast_runtime_fallback);
-
-  __ Ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ Cmp(object, function_prototype);
-  __ B(eq, &done);
-  __ Cmp(object, null);
-  __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ B(ne, &loop);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ Bind(&done);
-  __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
-  __ Ret();
-
-  // Found Proxy or access check needed: Call the runtime
-  __ Bind(&fast_runtime_fallback);
-  __ Push(object, function_prototype);
-  // Invalidate the instanceof cache.
-  __ Move(scratch, Smi::FromInt(0));
-  __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ Push(object, function);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec);
@@ -3815,8 +3682,8 @@
 
   __ Bind(&not_array);
   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
                                                receiver, name, feedback,
                                                receiver_map, scratch1, x7);
@@ -3951,8 +3818,8 @@
 
   __ Bind(&not_array);
   __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
                                                receiver, key, feedback,
                                                receiver_map, scratch1, x8);
@@ -4829,17 +4696,18 @@
   __ Bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ Mov(x1, x0);
   STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
-  __ Str(x2, MemOperand(x1, kPointerSize, PostIndex));
+  __ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
   __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
   STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
   STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
-  __ Stp(x3, x3, MemOperand(x1, 2 * kPointerSize, PostIndex));
+  __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
+  __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
 
   // ----------- S t a t e -------------
-  //  -- x0 : result (untagged)
+  //  -- x0 : result (tagged)
   //  -- x1 : result fields (untagged)
   //  -- x5 : result end (untagged)
   //  -- x2 : initial map
@@ -4857,10 +4725,6 @@
   {
     // Initialize all in-object fields with undefined.
     __ InitializeFieldsWithFiller(x1, x5, x6);
-
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ Add(x0, x0, kHeapObjectTag);
     __ Ret();
   }
   __ Bind(&slack_tracking);
@@ -4879,10 +4743,6 @@
     __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
     __ InitializeFieldsWithFiller(x1, x5, x6);
 
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ Add(x0, x0, kHeapObjectTag);
-
     // Check if we can finalize the instance size.
     Label finalize;
     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4912,10 +4772,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(x2);
   }
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ Sub(x0, x0, kHeapObjectTag);
   __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
   __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ Sub(x5, x5, kHeapObjectTag);  // Subtract the tag from end.
   __ B(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4934,20 +4794,20 @@
   // -----------------------------------
   __ AssertFunction(x1);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make x2 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ Mov(x2, fp);
-    __ B(&loop_entry);
-    __ Bind(&loop);
+  // Make x2 point to the JavaScript frame.
+  __ Mov(x2, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-    __ Bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
     __ Cmp(x3, x1);
-    __ B(ne, &loop);
+    __ B(eq, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ Bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4979,7 +4839,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, NO_ALLOCATION_FLAGS);
     __ Bind(&done_allocate);
 
     // Setup the rest parameter array in x0.
@@ -5020,7 +4880,7 @@
     Label allocate, done_allocate;
     __ Mov(x1, JSArray::kSize + FixedArray::kHeaderSize);
     __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
-    __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT);
+    __ Allocate(x1, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
     __ Bind(&done_allocate);
 
     // Compute arguments.length in x6.
@@ -5081,17 +4941,34 @@
   // -----------------------------------
   __ AssertFunction(x1);
 
+  // Make x6 point to the JavaScript frame.
+  __ Mov(x6, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ Ldr(x6, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ Ldr(x3, MemOperand(x6, StandardFrameConstants::kFunctionOffset));
+    __ Cmp(x3, x1);
+    __ B(eq, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ Bind(&ok);
+  }
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   __ Ldrsw(
       x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Add(x3, fp, Operand(x2, LSL, kPointerSizeLog2));
+  __ Add(x3, x6, Operand(x2, LSL, kPointerSizeLog2));
   __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
   __ SmiTag(x2);
 
   // x1 : function
   // x2 : number of parameters (tagged)
   // x3 : parameters pointer
+  // x6 : JavaScript frame pointer
   //
   // Returns pointer to result object in x0.
 
@@ -5109,7 +4986,7 @@
   Register caller_ctx = x12;
   Label runtime;
   Label adaptor_frame, try_allocate;
-  __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(caller_fp, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
   __ Ldr(
       caller_ctx,
       MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
@@ -5180,7 +5057,7 @@
   // Do the allocation of all three objects in one go. Assign this to x0, as it
   // will be returned to the caller.
   Register alloc_obj = x0;
-  __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+  __ Allocate(size, alloc_obj, x11, x12, &runtime, NO_ALLOCATION_FLAGS);
 
   // Get the arguments boilerplate from the current (global) context.
 
@@ -5364,20 +5241,20 @@
   // -----------------------------------
   __ AssertFunction(x1);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make x2 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ Mov(x2, fp);
-    __ B(&loop_entry);
-    __ Bind(&loop);
+  // Make x2 point to the JavaScript frame.
+  __ Mov(x2, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-    __ Bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
     __ Cmp(x3, x1);
-    __ B(ne, &loop);
+    __ B(eq, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ Bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -5414,7 +5291,7 @@
   Label allocate, done_allocate;
   __ Mov(x1, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
   __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
-  __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT);
+  __ Allocate(x1, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
   __ Bind(&done_allocate);
 
   // Compute arguments.length in x6.
@@ -5807,9 +5684,15 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
 
-  // FunctionCallbackArguments: context, callee and call data.
+  // FunctionCallbackArguments
+
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
+
+  // context, callee and call data.
   __ Push(context, callee, call_data);
 
   if (!is_lazy()) {
@@ -5833,7 +5716,7 @@
 
   // Allocate the v8::Arguments structure in the arguments' space, since it's
   // not controlled by GC.
-  const int kApiStackSpace = 4;
+  const int kApiStackSpace = 3;
 
   // Allocate space for CallApiFunctionAndReturn can store some scratch
   // registeres on the stack.
@@ -5849,10 +5732,9 @@
   // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
   __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
   __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
-  // FunctionCallbackInfo::length_ = argc and
-  // FunctionCallbackInfo::is_construct_call = 0
+  // FunctionCallbackInfo::length_ = argc
   __ Mov(x10, argc());
-  __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
+  __ Str(x10, MemOperand(x0, 2 * kPointerSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5869,9 +5751,9 @@
   }
   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   int stack_space = 0;
-  MemOperand is_construct_call_operand =
-      MemOperand(masm->StackPointer(), 4 * kPointerSize);
-  MemOperand* stack_space_operand = &is_construct_call_operand;
+  MemOperand length_operand =
+      MemOperand(masm->StackPointer(), 3 * kPointerSize);
+  MemOperand* stack_space_operand = &length_operand;
   stack_space = argc() + FCA::kArgsLength + 1;
   stack_space_operand = NULL;
 
@@ -5883,15 +5765,34 @@
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- sp[0]                         : name
-  //  -- sp[8 .. (8 + kArgsLength*8)]  : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- x2                            : api_function_address
-  // -----------------------------------
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
 
-  Register api_function_address = ApiGetterDescriptor::function_address();
-  DCHECK(api_function_address.is(x2));
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
+  Register scratch = x4;
+  Register scratch2 = x5;
+  Register scratch3 = x6;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+  __ Push(receiver);
+
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
+  __ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+  __ Push(scratch3, scratch, scratch, scratch2, holder);
+  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+  __ Push(scratch);
 
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5918,6 +5819,11 @@
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  Register api_function_address = x2;
+  __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+  __ Ldr(api_function_address,
+         FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
   const int spill_offset = 1 + kApiStackSpace;
   // +3 is to skip prolog, return address and name handle.
   MemOperand return_value_operand(
@@ -5927,7 +5833,6 @@
                            return_value_operand, NULL);
 }
 
-
 #undef __
 
 }  // namespace internal
diff --git a/src/arm64/codegen-arm64.cc b/src/arm64/codegen-arm64.cc
index c2073f1..990dd41 100644
--- a/src/arm64/codegen-arm64.cc
+++ b/src/arm64/codegen-arm64.cc
@@ -175,8 +175,8 @@
   Register map_root = array_size;
   __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
   __ SmiTag(x11, length);
-  __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+  __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
 
   __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
@@ -184,18 +184,18 @@
                       OMIT_SMI_CHECK);
 
   // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ Add(x10, array, kHeapObjectTag);
-  __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
-                      scratch, kLRHasBeenSaved, kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Move(x10, array);
+  __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
+                      kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
 
   // Prepare for conversion loop.
   Register src_elements = x10;
   Register dst_elements = x11;
   Register dst_end = x12;
   __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
+  __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
   __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
 
   FPRegister nan_d = d1;
@@ -282,8 +282,8 @@
   Register map_root = array_size;
   __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
   __ SmiTag(x11, length);
-  __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+  __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
 
   // Prepare for conversion loop.
   Register src_elements = x10;
@@ -293,7 +293,7 @@
   __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
   __ Add(src_elements, elements,
          FixedDoubleArray::kHeaderSize - kHeapObjectTag);
-  __ Add(dst_elements, array, FixedArray::kHeaderSize);
+  __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
   __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
 
   // Allocating heap numbers in the loop below can fail and cause a jump to
@@ -307,8 +307,7 @@
   __ Cmp(dst_elements, dst_end);
   __ B(lt, &initialization_loop);
 
-  __ Add(dst_elements, array, FixedArray::kHeaderSize);
-  __ Add(array, array, kHeapObjectTag);
+  __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
 
   Register heap_num_map = x15;
   __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
diff --git a/src/arm64/cpu-arm64.cc b/src/arm64/cpu-arm64.cc
index 712dbbd..7c1084f 100644
--- a/src/arm64/cpu-arm64.cc
+++ b/src/arm64/cpu-arm64.cc
@@ -58,14 +58,16 @@
   __asm__ __volatile__ (  // NOLINT
     // Clean every line of the D cache containing the target data.
     "0:                                \n\t"
-    // dc      : Data Cache maintenance
-    //    c    : Clean
-    //     va  : by (Virtual) Address
-    //       u : to the point of Unification
-    // The point of unification for a processor is the point by which the
-    // instruction and data caches are guaranteed to see the same copy of a
-    // memory location. See ARM DDI 0406B page B2-12 for more information.
-    "dc   cvau, %[dline]                \n\t"
+    // dc       : Data Cache maintenance
+    //    c     : Clean
+    //     i    : Invalidate
+    //      va  : by (Virtual) Address
+    //        c : to the point of Coherency
+    // See ARM DDI 0406B page B2-12 for more information.
+    // We would prefer to use "cvau" (clean to the point of unification) here
+    // but we use "civac" to work around Cortex-A53 errata 819472, 826319,
+    // 827319 and 824069.
+    "dc   civac, %[dline]               \n\t"
     "add  %[dline], %[dline], %[dsize]  \n\t"
     "cmp  %[dline], %[end]              \n\t"
     "b.lt 0b                            \n\t"
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
index f307aeb..d23eb58 100644
--- a/src/arm64/interface-descriptors-arm64.cc
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -48,16 +48,11 @@
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
 
 
-const Register InstanceOfDescriptor::LeftRegister() { return x1; }
-const Register InstanceOfDescriptor::RightRegister() { return x0; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return x1; }
 const Register StringCompareDescriptor::RightRegister() { return x0; }
 
-
-const Register ApiGetterDescriptor::function_address() { return x2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return x0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
 
 const Register MathPowTaggedDescriptor::exponent() { return x11; }
 
@@ -68,6 +63,8 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return x0; }
+const Register HasPropertyDescriptor::KeyRegister() { return x3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -273,13 +270,16 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {x0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  // register state
+  // x1: function
+  // x2: allocation site with elements kind
+  // x0: number of arguments to the constructor function
+  Register registers[] = {x1, x2, x0};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // x1: function
@@ -348,6 +348,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {x1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -430,9 +435,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
-      kInterpreterDispatchTableRegister};
+      kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+      kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -467,6 +471,15 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      x0,  // the value to pass to the generator
+      x1,  // the JSGeneratorObject to resume
+      x2   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index 12ddd81..8a54e20 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1373,10 +1373,6 @@
       Mrs(fpcr, FPCR);
     }
 
-    // Settings overridden by ConfiugreFPCR():
-    //   - Assert that default-NaN mode is set.
-    Tbz(fpcr, DN_offset, &unexpected_mode);
-
     // Settings left to their default values:
     //   - Assert that flush-to-zero is not set.
     Tbnz(fpcr, FZ_offset, &unexpected_mode);
@@ -1393,31 +1389,13 @@
 }
 
 
-void MacroAssembler::ConfigureFPCR() {
-  UseScratchRegisterScope temps(this);
-  Register fpcr = temps.AcquireX();
-  Mrs(fpcr, FPCR);
-
-  // If necessary, enable default-NaN mode. The default values of the other FPCR
-  // options should be suitable, and AssertFPCRState will verify that.
-  Label no_write_required;
-  Tbnz(fpcr, DN_offset, &no_write_required);
-
-  Orr(fpcr, fpcr, DN_mask);
-  Msr(FPCR, fpcr);
-
-  Bind(&no_write_required);
-  AssertFPCRState(fpcr);
-}
-
-
 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
                                      const FPRegister& src) {
   AssertFPCRState();
 
-  // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
-  // for NaNs, which become the default NaN. We use fsub rather than fadd
-  // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
+  // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+  // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
+  // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
   Fsub(dst, src, fp_zero);
 }
 
@@ -1558,7 +1536,7 @@
                                                      Label* no_memento_found) {
   Label map_check;
   Label top_check;
-  ExternalReference new_space_allocation_top =
+  ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -1568,7 +1546,9 @@
   Add(scratch1, receiver, kMementoEndOffset);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  Eor(scratch2, scratch1, new_space_allocation_top);
+  Mov(scratch2, new_space_allocation_top_adr);
+  Ldr(scratch2, MemOperand(scratch2));
+  Eor(scratch2, scratch1, scratch2);
   Tst(scratch2, ~Page::kPageAlignmentMask);
   B(eq, &top_check);
   // The object is on a different page than allocation top. Bail out if the
@@ -1582,7 +1562,9 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  Cmp(scratch1, new_space_allocation_top);
+  Mov(scratch2, new_space_allocation_top_adr);
+  Ldr(scratch2, MemOperand(scratch2));
+  Cmp(scratch1, scratch2);
   B(gt, no_memento_found);
   // Memento map check.
   bind(&map_check);
@@ -1659,6 +1641,17 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
+
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
+
+    CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
+    Check(eq, kOperandIsNotAGeneratorObject);
+  }
+}
 
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
@@ -3041,6 +3034,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3101,14 +3095,16 @@
 
   // Calculate new top and bail out if new space is exhausted.
   Adds(result_end, result, object_size);
-  Ccmp(result_end, alloc_limit, CFlag, cc);
+  Ccmp(result_end, alloc_limit, NoFlag, cc);
   B(hi, gc_required);
-  Str(result_end, MemOperand(top_address));
 
-  // Tag the object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    ObjectTag(result, result);
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    Str(result_end, MemOperand(top_address));
   }
+
+  // Tag the object.
+  ObjectTag(result, result);
 }
 
 
@@ -3181,16 +3177,88 @@
     Check(eq, kUnalignedAllocationInNewSpace);
   }
 
-  Ccmp(result_end, alloc_limit, CFlag, cc);
+  Ccmp(result_end, alloc_limit, NoFlag, cc);
   B(hi, gc_required);
-  Str(result_end, MemOperand(top_address));
 
-  // Tag the object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    ObjectTag(result, result);
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    Str(result_end, MemOperand(top_address));
   }
+
+  // Tag the object.
+  ObjectTag(result, result);
 }
 
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register scratch1, Register scratch2,
+                                  AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+
+  DCHECK(!AreAliased(result, scratch1, scratch2));
+  DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  DCHECK(0 == (object_size & kObjectAlignmentMask));
+
+  ExternalReference heap_allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address and allocation limit registers.
+  Register top_address = scratch1;
+  Register result_end = scratch2;
+  Mov(top_address, Operand(heap_allocation_top));
+  Ldr(result, MemOperand(top_address));
+
+  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+  // the same alignment on ARM64.
+  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+  // Calculate new top and write it back.
+  Adds(result_end, result, object_size);
+  Str(result_end, MemOperand(top_address));
+
+  ObjectTag(result, result);
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, Register scratch,
+                                  AllocationFlags flags) {
+  // |object_size| and |result_end| may overlap, other registers must not.
+  DCHECK(!AreAliased(object_size, result, scratch));
+  DCHECK(!AreAliased(result_end, result, scratch));
+  DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
+         result_end.Is64Bits());
+
+  ExternalReference heap_allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address and allocation limit registers.
+  Register top_address = scratch;
+  Mov(top_address, heap_allocation_top);
+  Ldr(result, MemOperand(top_address));
+
+  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+  // the same alignment on ARM64.
+  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+  // Calculate new top and write it back.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
+  } else {
+    Adds(result_end, result, object_size);
+  }
+  Str(result_end, MemOperand(top_address));
+
+  if (emit_debug_code()) {
+    Tst(result_end, kObjectAlignmentMask);
+    Check(eq, kUnalignedAllocationInNewSpace);
+  }
+
+  ObjectTag(result, result);
+}
 
 void MacroAssembler::AllocateTwoByteString(Register result,
                                            Register length,
@@ -3207,12 +3275,8 @@
   Bic(scratch1, scratch1, kObjectAlignmentMask);
 
   // Allocate two-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result,
@@ -3236,12 +3300,8 @@
   Bic(scratch1, scratch1, kObjectAlignmentMask);
 
   // Allocate one-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -3255,7 +3315,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result,
                       length,
@@ -3269,12 +3329,8 @@
                                                Register scratch1,
                                                Register scratch2,
                                                Label* gc_required) {
-  Allocate(ConsString::kSize,
-           result,
-           scratch1,
-           scratch2,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -3288,7 +3344,7 @@
                                                  Label* gc_required) {
   DCHECK(!AreAliased(result, length, scratch1, scratch2));
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result,
                       length,
@@ -3305,7 +3361,7 @@
                                                  Label* gc_required) {
   DCHECK(!AreAliased(result, length, scratch1, scratch2));
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -3359,14 +3415,14 @@
   if (value.IsSameSizeAndType(heap_number_map)) {
     STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
                   HeapNumber::kValueOffset);
-    Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
+    Stp(heap_number_map, value,
+        FieldMemOperand(result, HeapObject::kMapOffset));
   } else {
-    Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+    Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
     if (value.IsValid()) {
-      Str(value, MemOperand(result, HeapNumber::kValueOffset));
+      Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
     }
   }
-  ObjectTag(result, result);
 }
 
 
@@ -3390,7 +3446,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index 4b6b3c0..67e64f4 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -21,12 +21,15 @@
   #define ASM_UNIMPLEMENTED_BREAK(message)                                   \
   __ Debug(message, __LINE__,                                                \
            FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
-  #define ASM_LOCATION(message)                                              \
-  __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#if DEBUG
+#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
 #else
-  #define ASM_UNIMPLEMENTED(message)
-  #define ASM_UNIMPLEMENTED_BREAK(message)
-  #define ASM_LOCATION(message)
+#define ASM_LOCATION(message)
+#endif
+#else
+#define ASM_UNIMPLEMENTED(message)
+#define ASM_UNIMPLEMENTED_BREAK(message)
+#define ASM_LOCATION(message)
 #endif
 
 
@@ -39,8 +42,8 @@
 #define kReturnRegister2 x2
 #define kJSFunctionRegister x1
 #define kContextRegister cp
+#define kAllocateSizeRegister x1
 #define kInterpreterAccumulatorRegister x0
-#define kInterpreterRegisterFileRegister x18
 #define kInterpreterBytecodeOffsetRegister x19
 #define kInterpreterBytecodeArrayRegister x20
 #define kInterpreterDispatchTableRegister x21
@@ -865,7 +868,6 @@
   inline void InitializeRootRegister();
 
   void AssertFPCRState(Register fpcr = NoReg);
-  void ConfigureFPCR();
   void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
   void CanonicalizeNaN(const FPRegister& reg) {
     CanonicalizeNaN(reg, reg);
@@ -970,6 +972,10 @@
   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
   void AssertFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSBoundFunction,
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
@@ -1306,7 +1312,6 @@
   //
   // If the new space is exhausted control continues at the gc_required label.
   // In this case, the result and scratch registers may still be clobbered.
-  // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
   void Allocate(Register object_size, Register result, Register result_end,
                 Register scratch, Label* gc_required, AllocationFlags flags);
 
@@ -1317,6 +1322,15 @@
                 Label* gc_required,
                 AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(Register object_size, Register result, Register result_end,
+                    Register scratch, AllocationFlags flags);
+
+  void FastAllocate(int object_size, Register result, Register scratch1,
+                    Register scratch2, AllocationFlags flags);
+
   void AllocateTwoByteString(Register result,
                              Register length,
                              Register scratch1,
diff --git a/src/arm64/simulator-arm64.h b/src/arm64/simulator-arm64.h
index 724c767..586f204 100644
--- a/src/arm64/simulator-arm64.h
+++ b/src/arm64/simulator-arm64.h
@@ -14,6 +14,7 @@
 #include "src/arm64/disasm-arm64.h"
 #include "src/arm64/instrument-arm64.h"
 #include "src/assembler.h"
+#include "src/base/compiler-specific.h"
 #include "src/globals.h"
 #include "src/utils.h"
 
@@ -794,7 +795,7 @@
   // Output stream.
   FILE* stream_;
   PrintDisassembler* print_disasm_;
-  void PRINTF_METHOD_CHECKING TraceSim(const char* format, ...);
+  void PRINTF_FORMAT(2, 3) TraceSim(const char* format, ...);
 
   // Instrumentation.
   Instrument* instrument_;
diff --git a/src/assembler.cc b/src/assembler.cc
index a912bb6..17cd56b 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -201,7 +201,6 @@
 
 void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
   if (size == 0) return;
-  if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
 
 #if defined(USE_SIMULATOR)
   Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
@@ -514,7 +513,8 @@
     if (RelocInfo::IsComment(rmode)) {
       WriteData(rinfo->data());
     } else if (RelocInfo::IsConstPool(rmode) ||
-               RelocInfo::IsVeneerPool(rmode)) {
+               RelocInfo::IsVeneerPool(rmode) ||
+               RelocInfo::IsDeoptId(rmode)) {
       WriteIntData(static_cast<int>(rinfo->data()));
     }
   }
@@ -705,7 +705,8 @@
             Advance(kIntSize);
           }
         } else if (RelocInfo::IsConstPool(rmode) ||
-                   RelocInfo::IsVeneerPool(rmode)) {
+                   RelocInfo::IsVeneerPool(rmode) ||
+                   RelocInfo::IsDeoptId(rmode)) {
           if (SetMode(rmode)) {
             AdvanceReadInt();
             return;
@@ -828,6 +829,8 @@
       return "encoded internal reference";
     case DEOPT_REASON:
       return "deopt reason";
+    case DEOPT_ID:
+      return "deopt index";
     case CONST_POOL:
       return "constant pool";
     case VENEER_POOL:
@@ -846,6 +849,8 @@
       return "generator continuation";
     case WASM_MEMORY_REFERENCE:
       return "wasm memory reference";
+    case WASM_MEMORY_SIZE_REFERENCE:
+      return "wasm memory size reference";
     case NUMBER_OF_MODES:
     case PC_JUMP:
       UNREACHABLE();
@@ -933,6 +938,7 @@
     case STATEMENT_POSITION:
     case EXTERNAL_REFERENCE:
     case DEOPT_REASON:
+    case DEOPT_ID:
     case CONST_POOL:
     case VENEER_POOL:
     case DEBUG_BREAK_SLOT_AT_POSITION:
@@ -941,6 +947,7 @@
     case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
     case GENERATOR_CONTINUATION:
     case WASM_MEMORY_REFERENCE:
+    case WASM_MEMORY_SIZE_REFERENCE:
     case NONE32:
     case NONE64:
       break;
@@ -1072,6 +1079,12 @@
   return ExternalReference(isolate->interpreter()->dispatch_table_address());
 }
 
+ExternalReference ExternalReference::interpreter_dispatch_counters(
+    Isolate* isolate) {
+  return ExternalReference(
+      isolate->interpreter()->bytecode_dispatch_counters_table());
+}
+
 ExternalReference::ExternalReference(StatsCounter* counter)
   : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
 
@@ -1255,6 +1268,26 @@
       Redirect(isolate, FUNCTION_ADDR(wasm::uint64_mod_wrapper)));
 }
 
+ExternalReference ExternalReference::wasm_word32_ctz(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::word32_ctz_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word64_ctz(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::word64_ctz_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word32_popcnt(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::word32_popcnt_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word64_popcnt(Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
+}
+
 static void f64_acos_wrapper(double* param) { *param = std::acos(*param); }
 
 ExternalReference ExternalReference::f64_acos_wrapper_function(
@@ -2023,12 +2056,12 @@
 
 // Platform specific but identical code for all the platforms.
 
-
-void Assembler::RecordDeoptReason(const int reason, int raw_position) {
+void Assembler::RecordDeoptReason(const int reason, int raw_position, int id) {
   if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
     EnsureSpace ensure_space(this);
     RecordRelocInfo(RelocInfo::POSITION, raw_position);
     RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
+    RecordRelocInfo(RelocInfo::DEOPT_ID, id);
   }
 }
 
diff --git a/src/assembler.h b/src/assembler.h
index 192d16b..353abdb 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -384,9 +384,10 @@
     CODE_TARGET_WITH_ID,
     DEBUGGER_STATEMENT,  // Code target for the debugger statement.
     EMBEDDED_OBJECT,
-    CELL,
     // To relocate pointers into the wasm memory embedded in wasm code
     WASM_MEMORY_REFERENCE,
+    WASM_MEMORY_SIZE_REFERENCE,
+    CELL,
 
     // Everything after runtime_entry (inclusive) is not GC'ed.
     RUNTIME_ENTRY,
@@ -415,6 +416,7 @@
     VENEER_POOL,
 
     DEOPT_REASON,  // Deoptimization reason index.
+    DEOPT_ID,      // Deoptimization inlining id.
 
     // This is not an actual reloc mode, but used to encode a long pc jump that
     // cannot be encoded as part of another record.
@@ -430,7 +432,7 @@
     FIRST_REAL_RELOC_MODE = CODE_TARGET,
     LAST_REAL_RELOC_MODE = VENEER_POOL,
     LAST_CODE_ENUM = DEBUGGER_STATEMENT,
-    LAST_GCED_ENUM = WASM_MEMORY_REFERENCE,
+    LAST_GCED_ENUM = WASM_MEMORY_SIZE_REFERENCE,
     FIRST_SHAREABLE_RELOC_MODE = CELL,
   };
 
@@ -475,6 +477,9 @@
   static inline bool IsDeoptReason(Mode mode) {
     return mode == DEOPT_REASON;
   }
+  static inline bool IsDeoptId(Mode mode) {
+    return mode == DEOPT_ID;
+  }
   static inline bool IsPosition(Mode mode) {
     return mode == POSITION || mode == STATEMENT_POSITION;
   }
@@ -521,6 +526,9 @@
   static inline bool IsWasmMemoryReference(Mode mode) {
     return mode == WASM_MEMORY_REFERENCE;
   }
+  static inline bool IsWasmMemorySizeReference(Mode mode) {
+    return mode == WASM_MEMORY_SIZE_REFERENCE;
+  }
   static inline int ModeMask(Mode mode) { return 1 << mode; }
 
   // Accessors
@@ -547,6 +555,12 @@
   // constant pool, otherwise the pointer is embedded in the instruction stream.
   bool IsInConstantPool();
 
+  Address wasm_memory_reference();
+  uint32_t wasm_memory_size_reference();
+  void update_wasm_memory_reference(
+      Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+      ICacheFlushMode icache_flush_mode = SKIP_ICACHE_FLUSH);
+
   // this relocation applies to;
   // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
   INLINE(Address target_address());
@@ -581,10 +595,6 @@
                                 ICacheFlushMode icache_flush_mode =
                                     FLUSH_ICACHE_IF_NEEDED));
 
-  INLINE(Address wasm_memory_reference());
-  INLINE(void update_wasm_memory_reference(
-      Address old_base, Address new_base, size_t old_size, size_t new_size,
-      ICacheFlushMode icache_flush_mode = SKIP_ICACHE_FLUSH));
   // Returns the address of the constant pool entry where the target address
   // is held.  This should only be called if IsInConstantPool returns true.
   INLINE(Address constant_pool_entry_address());
@@ -631,6 +641,8 @@
   INLINE(void WipeOut());
 
   template<typename StaticVisitor> inline void Visit(Heap* heap);
+
+  template <typename ObjectVisitor>
   inline void Visit(Isolate* isolate, ObjectVisitor* v);
 
   // Check whether this debug break slot has been patched with a call to the
@@ -909,6 +921,7 @@
   // ExternalReferenceTable in serialize.cc manually.
 
   static ExternalReference interpreter_dispatch_table_address(Isolate* isolate);
+  static ExternalReference interpreter_dispatch_counters(Isolate* isolate);
 
   static ExternalReference incremental_marking_record_write_function(
       Isolate* isolate);
@@ -948,6 +961,10 @@
   static ExternalReference wasm_int64_mod(Isolate* isolate);
   static ExternalReference wasm_uint64_div(Isolate* isolate);
   static ExternalReference wasm_uint64_mod(Isolate* isolate);
+  static ExternalReference wasm_word32_ctz(Isolate* isolate);
+  static ExternalReference wasm_word64_ctz(Isolate* isolate);
+  static ExternalReference wasm_word32_popcnt(Isolate* isolate);
+  static ExternalReference wasm_word64_popcnt(Isolate* isolate);
 
   static ExternalReference f64_acos_wrapper_function(Isolate* isolate);
   static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
diff --git a/src/ast/ast-expression-rewriter.cc b/src/ast/ast-expression-rewriter.cc
index edee91d..b39f7f1 100644
--- a/src/ast/ast-expression-rewriter.cc
+++ b/src/ast/ast-expression-rewriter.cc
@@ -169,12 +169,10 @@
 
 
 void AstExpressionRewriter::VisitForOfStatement(ForOfStatement* node) {
-  AST_REWRITE_PROPERTY(Expression, node, each);
   AST_REWRITE_PROPERTY(Expression, node, assign_iterator);
   AST_REWRITE_PROPERTY(Expression, node, next_result);
   AST_REWRITE_PROPERTY(Expression, node, result_done);
   AST_REWRITE_PROPERTY(Expression, node, assign_each);
-  AST_REWRITE_PROPERTY(Expression, node, subject);
   AST_REWRITE_PROPERTY(Statement, node, body);
 }
 
diff --git a/src/ast/ast-expression-visitor.cc b/src/ast/ast-expression-visitor.cc
index dbf4ea4..91d4afb 100644
--- a/src/ast/ast-expression-visitor.cc
+++ b/src/ast/ast-expression-visitor.cc
@@ -170,8 +170,6 @@
 
 
 void AstExpressionVisitor::VisitForOfStatement(ForOfStatement* stmt) {
-  RECURSE(Visit(stmt->iterable()));
-  RECURSE(Visit(stmt->each()));
   RECURSE(Visit(stmt->assign_iterator()));
   RECURSE(Visit(stmt->next_result()));
   RECURSE(Visit(stmt->result_done()));
diff --git a/src/ast/ast-expression-visitor.h b/src/ast/ast-expression-visitor.h
index 545a45c..283bc7b 100644
--- a/src/ast/ast-expression-visitor.h
+++ b/src/ast/ast-expression-visitor.h
@@ -27,10 +27,10 @@
   virtual void VisitExpression(Expression* expression) = 0;
   int depth() { return depth_; }
 
- private:
   void VisitDeclarations(ZoneList<Declaration*>* d) override;
   void VisitStatements(ZoneList<Statement*>* s) override;
 
+ private:
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
 #define DECLARE_VISIT(type) void Visit##type(type* node) override;
diff --git a/src/ast/ast-numbering.cc b/src/ast/ast-numbering.cc
index f54333f..bd96026 100644
--- a/src/ast/ast-numbering.cc
+++ b/src/ast/ast-numbering.cc
@@ -17,6 +17,7 @@
         isolate_(isolate),
         zone_(zone),
         next_id_(BailoutId::FirstUsable().ToInt()),
+        yield_count_(0),
         properties_(zone),
         slot_cache_(zone),
         dont_optimize_reason_(kNoReason) {
@@ -31,8 +32,6 @@
   AST_NODE_LIST(DEFINE_VISIT)
 #undef DEFINE_VISIT
 
-  bool Finish(FunctionLiteral* node);
-
   void VisitVariableProxyReference(VariableProxy* node);
   void VisitPropertyReference(Property* node);
   void VisitReference(Expression* expr);
@@ -76,6 +75,7 @@
   Isolate* isolate_;
   Zone* zone_;
   int next_id_;
+  int yield_count_;
   AstProperties properties_;
   // The slot cache allows us to reuse certain feedback vector slots.
   FeedbackVectorSlotCache slot_cache_;
@@ -217,6 +217,8 @@
 
 
 void AstNumberingVisitor::VisitYield(Yield* node) {
+  node->set_yield_id(yield_count_);
+  yield_count_++;
   IncrementNodeCount();
   DisableOptimization(kYield);
   ReserveFeedbackSlots(node);
@@ -284,8 +286,10 @@
   IncrementNodeCount();
   DisableSelfOptimization();
   node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
+  node->set_first_yield_id(yield_count_);
   Visit(node->body());
   Visit(node->cond());
+  node->set_yield_count(yield_count_ - node->first_yield_id());
 }
 
 
@@ -293,8 +297,10 @@
   IncrementNodeCount();
   DisableSelfOptimization();
   node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
+  node->set_first_yield_id(yield_count_);
   Visit(node->cond());
   Visit(node->body());
+  node->set_yield_count(yield_count_ - node->first_yield_id());
 }
 
 
@@ -377,9 +383,11 @@
   IncrementNodeCount();
   DisableSelfOptimization();
   node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
+  Visit(node->enumerable());  // Not part of loop.
+  node->set_first_yield_id(yield_count_);
   Visit(node->each());
-  Visit(node->enumerable());
   Visit(node->body());
+  node->set_yield_count(yield_count_ - node->first_yield_id());
   ReserveFeedbackSlots(node);
 }
 
@@ -388,11 +396,13 @@
   IncrementNodeCount();
   DisableCrankshaft(kForOfStatement);
   node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
-  Visit(node->assign_iterator());
+  Visit(node->assign_iterator());  // Not part of loop.
+  node->set_first_yield_id(yield_count_);
   Visit(node->next_result());
   Visit(node->result_done());
   Visit(node->assign_each());
   Visit(node->body());
+  node->set_yield_count(yield_count_ - node->first_yield_id());
   ReserveFeedbackSlots(node);
 }
 
@@ -440,10 +450,12 @@
   IncrementNodeCount();
   DisableSelfOptimization();
   node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
-  if (node->init() != NULL) Visit(node->init());
+  if (node->init() != NULL) Visit(node->init());  // Not part of loop.
+  node->set_first_yield_id(yield_count_);
   if (node->cond() != NULL) Visit(node->cond());
   if (node->next() != NULL) Visit(node->next());
   Visit(node->body());
+  node->set_yield_count(yield_count_ - node->first_yield_id());
 }
 
 
@@ -554,13 +566,6 @@
 }
 
 
-bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
-  node->set_ast_properties(&properties_);
-  node->set_dont_optimize_reason(dont_optimize_reason());
-  return !HasStackOverflow();
-}
-
-
 bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
   Scope* scope = node->scope();
   if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
@@ -577,7 +582,10 @@
   VisitDeclarations(scope->declarations());
   VisitStatements(node->body());
 
-  return Finish(node);
+  node->set_ast_properties(&properties_);
+  node->set_dont_optimize_reason(dont_optimize_reason());
+  node->set_yield_count(yield_count_);
+  return !HasStackOverflow();
 }
 
 
diff --git a/src/ast/ast-numbering.h b/src/ast/ast-numbering.h
index 0ac1ef2..a1d3137 100644
--- a/src/ast/ast-numbering.h
+++ b/src/ast/ast-numbering.h
@@ -14,11 +14,27 @@
 class Zone;
 
 namespace AstNumbering {
-// Assign type feedback IDs and bailout IDs to an AST node tree.
-//
+// Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node
+// tree.
 bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
 }
 
+// Some details on yield IDs
+// -------------------------
+//
+// In order to assist Ignition in generating bytecode for a generator function,
+// we assign a unique number (the yield ID) to each Yield node in its AST. We
+// also annotate loops with the number of yields they contain (loop.yield_count)
+// and the smallest ID of those (loop.first_yield_id), and we annotate the
+// function itself with the number of yields it contains (function.yield_count).
+//
+// The way in which we choose the IDs is simply by enumerating the Yield nodes.
+// Ignition relies on the following properties:
+// - For each loop l and each yield y of l:
+//     l.first_yield_id  <=  y.yield_id  <  l.first_yield_id + l.yield_count
+// - For the generator function f itself and each yield y of f:
+//                    0  <=  y.yield_id  <  f.yield_count
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ast/ast-type-bounds.h b/src/ast/ast-type-bounds.h
new file mode 100644
index 0000000..ec26fdf
--- /dev/null
+++ b/src/ast/ast-type-bounds.h
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A container to associate type bounds with AST Expression nodes.
+
+#ifndef V8_AST_AST_TYPE_BOUNDS_H_
+#define V8_AST_AST_TYPE_BOUNDS_H_
+
+#include "src/types.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Expression;
+
+class AstTypeBounds {
+ public:
+  explicit AstTypeBounds(Zone* zone) : bounds_map_(zone) {}
+  ~AstTypeBounds() {}
+
+  Bounds get(Expression* expression) const {
+    ZoneMap<Expression*, Bounds>::const_iterator i =
+        bounds_map_.find(expression);
+    return (i != bounds_map_.end()) ? i->second : Bounds::Unbounded();
+  }
+
+  void set(Expression* expression, Bounds bounds) {
+    bounds_map_[expression] = bounds;
+  }
+
+ private:
+  ZoneMap<Expression*, Bounds> bounds_map_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_AST_AST_TYPE_BOUNDS_H_
diff --git a/src/ast/ast-value-factory.h b/src/ast/ast-value-factory.h
index 8b3f0ed..041581b 100644
--- a/src/ast/ast-value-factory.h
+++ b/src/ast/ast-value-factory.h
@@ -242,6 +242,8 @@
 #define STRING_CONSTANTS(F)                     \
   F(anonymous_function, "(anonymous function)") \
   F(arguments, "arguments")                     \
+  F(async, "async")                             \
+  F(await, "await")                             \
   F(constructor, "constructor")                 \
   F(default, "default")                         \
   F(done, "done")                               \
diff --git a/src/ast/ast.cc b/src/ast/ast.cc
index e8b6269..7c83e3b 100644
--- a/src/ast/ast.cc
+++ b/src/ast/ast.cc
@@ -813,6 +813,291 @@
   }
 }
 
+// ----------------------------------------------------------------------------
+// Implementation of AstTraversalVisitor
+
+AstTraversalVisitor::AstTraversalVisitor(Isolate* isolate) {
+  InitializeAstVisitor(isolate);
+}
+
+void AstTraversalVisitor::VisitVariableDeclaration(VariableDeclaration* decl) {}
+
+void AstTraversalVisitor::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+  Visit(decl->fun());
+}
+
+void AstTraversalVisitor::VisitImportDeclaration(ImportDeclaration* decl) {}
+
+void AstTraversalVisitor::VisitExportDeclaration(ExportDeclaration* decl) {}
+
+void AstTraversalVisitor::VisitStatements(ZoneList<Statement*>* stmts) {
+  for (int i = 0; i < stmts->length(); ++i) {
+    Statement* stmt = stmts->at(i);
+    Visit(stmt);
+    if (stmt->IsJump()) break;
+  }
+}
+
+void AstTraversalVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
+  for (int i = 0; i < expressions->length(); i++) {
+    Expression* expression = expressions->at(i);
+    if (expression != NULL) Visit(expression);
+  }
+}
+
+void AstTraversalVisitor::VisitBlock(Block* stmt) {
+  VisitStatements(stmt->statements());
+}
+
+void AstTraversalVisitor::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+void AstTraversalVisitor::VisitEmptyStatement(EmptyStatement* stmt) {}
+
+void AstTraversalVisitor::VisitSloppyBlockFunctionStatement(
+    SloppyBlockFunctionStatement* stmt) {
+  Visit(stmt->statement());
+}
+
+void AstTraversalVisitor::VisitIfStatement(IfStatement* stmt) {
+  Visit(stmt->condition());
+  Visit(stmt->then_statement());
+  Visit(stmt->else_statement());
+}
+
+void AstTraversalVisitor::VisitContinueStatement(ContinueStatement* stmt) {}
+
+void AstTraversalVisitor::VisitBreakStatement(BreakStatement* stmt) {}
+
+void AstTraversalVisitor::VisitReturnStatement(ReturnStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+void AstTraversalVisitor::VisitWithStatement(WithStatement* stmt) {
+  stmt->expression();
+  stmt->statement();
+}
+
+void AstTraversalVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
+  Visit(stmt->tag());
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+
+  for (int i = 0; i < clauses->length(); ++i) {
+    CaseClause* clause = clauses->at(i);
+    if (!clause->is_default()) {
+      Expression* label = clause->label();
+      Visit(label);
+    }
+    ZoneList<Statement*>* stmts = clause->statements();
+    VisitStatements(stmts);
+  }
+}
+
+void AstTraversalVisitor::VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
+
+void AstTraversalVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  Visit(stmt->body());
+  Visit(stmt->cond());
+}
+
+void AstTraversalVisitor::VisitWhileStatement(WhileStatement* stmt) {
+  Visit(stmt->cond());
+  Visit(stmt->body());
+}
+
+void AstTraversalVisitor::VisitForStatement(ForStatement* stmt) {
+  if (stmt->init() != NULL) {
+    Visit(stmt->init());
+  }
+  if (stmt->cond() != NULL) {
+    Visit(stmt->cond());
+  }
+  if (stmt->next() != NULL) {
+    Visit(stmt->next());
+  }
+  Visit(stmt->body());
+}
+
+void AstTraversalVisitor::VisitForInStatement(ForInStatement* stmt) {
+  Visit(stmt->enumerable());
+  Visit(stmt->body());
+}
+
+void AstTraversalVisitor::VisitForOfStatement(ForOfStatement* stmt) {
+  Visit(stmt->assign_iterator());
+  Visit(stmt->next_result());
+  Visit(stmt->result_done());
+  Visit(stmt->assign_each());
+  Visit(stmt->body());
+}
+
+void AstTraversalVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  Visit(stmt->try_block());
+  Visit(stmt->catch_block());
+}
+
+void AstTraversalVisitor::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  Visit(stmt->try_block());
+  Visit(stmt->finally_block());
+}
+
+void AstTraversalVisitor::VisitDebuggerStatement(DebuggerStatement* stmt) {}
+
+void AstTraversalVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Scope* scope = expr->scope();
+  VisitDeclarations(scope->declarations());
+  VisitStatements(expr->body());
+}
+
+void AstTraversalVisitor::VisitNativeFunctionLiteral(
+    NativeFunctionLiteral* expr) {}
+
+void AstTraversalVisitor::VisitDoExpression(DoExpression* expr) {
+  VisitBlock(expr->block());
+  VisitVariableProxy(expr->result());
+}
+
+void AstTraversalVisitor::VisitConditional(Conditional* expr) {
+  Visit(expr->condition());
+  Visit(expr->then_expression());
+  Visit(expr->else_expression());
+}
+
+void AstTraversalVisitor::VisitVariableProxy(VariableProxy* expr) {}
+
+void AstTraversalVisitor::VisitLiteral(Literal* expr) {}
+
+void AstTraversalVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {}
+
+void AstTraversalVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
+  ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+  for (int i = 0; i < props->length(); ++i) {
+    ObjectLiteralProperty* prop = props->at(i);
+    if (!prop->key()->IsLiteral()) {
+      Visit(prop->key());
+    }
+    Visit(prop->value());
+  }
+}
+
+void AstTraversalVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
+  ZoneList<Expression*>* values = expr->values();
+  for (int i = 0; i < values->length(); ++i) {
+    Expression* value = values->at(i);
+    Visit(value);
+  }
+}
+
+void AstTraversalVisitor::VisitAssignment(Assignment* expr) {
+  Visit(expr->target());
+  Visit(expr->value());
+}
+
+void AstTraversalVisitor::VisitYield(Yield* expr) {
+  Visit(expr->generator_object());
+  Visit(expr->expression());
+}
+
+void AstTraversalVisitor::VisitThrow(Throw* expr) { Visit(expr->exception()); }
+
+void AstTraversalVisitor::VisitProperty(Property* expr) {
+  Visit(expr->obj());
+  Visit(expr->key());
+}
+
+void AstTraversalVisitor::VisitCall(Call* expr) {
+  Visit(expr->expression());
+  ZoneList<Expression*>* args = expr->arguments();
+  for (int i = 0; i < args->length(); ++i) {
+    Expression* arg = args->at(i);
+    Visit(arg);
+  }
+}
+
+void AstTraversalVisitor::VisitCallNew(CallNew* expr) {
+  Visit(expr->expression());
+  ZoneList<Expression*>* args = expr->arguments();
+  for (int i = 0; i < args->length(); ++i) {
+    Expression* arg = args->at(i);
+    Visit(arg);
+  }
+}
+
+void AstTraversalVisitor::VisitCallRuntime(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  for (int i = 0; i < args->length(); ++i) {
+    Expression* arg = args->at(i);
+    Visit(arg);
+  }
+}
+
+void AstTraversalVisitor::VisitUnaryOperation(UnaryOperation* expr) {
+  Visit(expr->expression());
+}
+
+void AstTraversalVisitor::VisitCountOperation(CountOperation* expr) {
+  Visit(expr->expression());
+}
+
+void AstTraversalVisitor::VisitBinaryOperation(BinaryOperation* expr) {
+  Visit(expr->left());
+  Visit(expr->right());
+}
+
+void AstTraversalVisitor::VisitCompareOperation(CompareOperation* expr) {
+  Visit(expr->left());
+  Visit(expr->right());
+}
+
+void AstTraversalVisitor::VisitThisFunction(ThisFunction* expr) {}
+
+void AstTraversalVisitor::VisitDeclarations(ZoneList<Declaration*>* decls) {
+  for (int i = 0; i < decls->length(); ++i) {
+    Declaration* decl = decls->at(i);
+    Visit(decl);
+  }
+}
+
+void AstTraversalVisitor::VisitClassLiteral(ClassLiteral* expr) {
+  if (expr->extends() != nullptr) {
+    Visit(expr->extends());
+  }
+  Visit(expr->constructor());
+  ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+  for (int i = 0; i < props->length(); ++i) {
+    ObjectLiteralProperty* prop = props->at(i);
+    if (!prop->key()->IsLiteral()) {
+      Visit(prop->key());
+    }
+    Visit(prop->value());
+  }
+}
+
+void AstTraversalVisitor::VisitSpread(Spread* expr) {
+  Visit(expr->expression());
+}
+
+void AstTraversalVisitor::VisitEmptyParentheses(EmptyParentheses* expr) {}
+
+void AstTraversalVisitor::VisitSuperPropertyReference(
+    SuperPropertyReference* expr) {
+  VisitVariableProxy(expr->this_var());
+  Visit(expr->home_object());
+}
+
+void AstTraversalVisitor::VisitSuperCallReference(SuperCallReference* expr) {
+  VisitVariableProxy(expr->this_var());
+  VisitVariableProxy(expr->new_target_var());
+  VisitVariableProxy(expr->this_function_var());
+}
+
+void AstTraversalVisitor::VisitRewritableExpression(
+    RewritableExpression* expr) {
+  Visit(expr->expression());
+}
+
 CaseClause::CaseClause(Zone* zone, Expression* label,
                        ZoneList<Statement*>* statements, int pos)
     : Expression(zone, pos),
diff --git a/src/ast/ast.h b/src/ast/ast.h
index 52bac8e..bee0bab 100644
--- a/src/ast/ast.h
+++ b/src/ast/ast.h
@@ -337,10 +337,6 @@
   // True iff the expression is a valid target for an assignment.
   bool IsValidReferenceExpressionOrThis() const;
 
-  // Expression type bounds
-  Bounds bounds() const { return bounds_; }
-  void set_bounds(Bounds bounds) { bounds_ = bounds; }
-
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
     UNREACHABLE();
@@ -374,7 +370,6 @@
   Expression(Zone* zone, int pos)
       : AstNode(pos),
         base_id_(BailoutId::None().ToInt()),
-        bounds_(Bounds::Unbounded()),
         bit_field_(0) {}
   static int parent_num_ids() { return 0; }
   void set_to_boolean_types(uint16_t types) {
@@ -390,7 +385,6 @@
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   int base_id_;
-  Bounds bounds_;
   class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
   uint16_t bit_field_;
   // Ends with 16-bit field; deriving classes in turn begin with
@@ -598,7 +592,7 @@
   ImportDeclaration(Zone* zone, VariableProxy* proxy,
                     const AstRawString* import_name,
                     const AstRawString* module_specifier, Scope* scope, int pos)
-      : Declaration(zone, proxy, IMPORT, scope, pos),
+      : Declaration(zone, proxy, CONST, scope, pos),
         import_name_(import_name),
         module_specifier_(module_specifier) {}
 
@@ -647,6 +641,13 @@
   Statement* body() const { return body_; }
   void set_body(Statement* s) { body_ = s; }
 
+  int yield_count() const { return yield_count_; }
+  int first_yield_id() const { return first_yield_id_; }
+  void set_yield_count(int yield_count) { yield_count_ = yield_count; }
+  void set_first_yield_id(int first_yield_id) {
+    first_yield_id_ = first_yield_id;
+  }
+
   static int num_ids() { return parent_num_ids() + 1; }
   BailoutId OsrEntryId() const { return BailoutId(local_id(0)); }
   virtual BailoutId ContinueId() const = 0;
@@ -658,7 +659,9 @@
  protected:
   IterationStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
       : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
-        body_(NULL) {}
+        body_(NULL),
+        yield_count_(0),
+        first_yield_id_(0) {}
   static int parent_num_ids() { return BreakableStatement::num_ids(); }
   void Initialize(Statement* body) { body_ = body; }
 
@@ -667,6 +670,8 @@
 
   Statement* body_;
   Label continue_target_;
+  int yield_count_;
+  int first_yield_id_;
 };
 
 
@@ -779,17 +784,7 @@
     ITERATE      // for (each of subject) body;
   };
 
-  void Initialize(Expression* each, Expression* subject, Statement* body) {
-    IterationStatement::Initialize(body);
-    each_ = each;
-    subject_ = subject;
-  }
-
-  Expression* each() const { return each_; }
-  Expression* subject() const { return subject_; }
-
-  void set_each(Expression* e) { each_ = e; }
-  void set_subject(Expression* e) { subject_ = e; }
+  using IterationStatement::Initialize;
 
   static const char* VisitModeString(VisitMode mode) {
     return mode == ITERATE ? "for-of" : "for-in";
@@ -797,11 +792,7 @@
 
  protected:
   ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
-      : IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {}
-
- private:
-  Expression* each_;
-  Expression* subject_;
+      : IterationStatement(zone, labels, pos) {}
 };
 
 
@@ -809,10 +800,22 @@
  public:
   DECLARE_NODE_TYPE(ForInStatement)
 
+  void Initialize(Expression* each, Expression* subject, Statement* body) {
+    ForEachStatement::Initialize(body);
+    each_ = each;
+    subject_ = subject;
+  }
+
   Expression* enumerable() const {
     return subject();
   }
 
+  Expression* each() const { return each_; }
+  Expression* subject() const { return subject_; }
+
+  void set_each(Expression* e) { each_ = e; }
+  void set_subject(Expression* e) { subject_ = e; }
+
   // Type feedback information.
   void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
                                  FeedbackVectorSlotCache* cache) override;
@@ -838,12 +841,17 @@
 
  protected:
   ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
-      : ForEachStatement(zone, labels, pos), for_in_type_(SLOW_FOR_IN) {}
+      : ForEachStatement(zone, labels, pos),
+        each_(nullptr),
+        subject_(nullptr),
+        for_in_type_(SLOW_FOR_IN) {}
   static int parent_num_ids() { return ForEachStatement::num_ids(); }
 
  private:
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
+  Expression* each_;
+  Expression* subject_;
   ForInType for_in_type_;
   FeedbackVectorSlot each_slot_;
   FeedbackVectorSlot for_in_feedback_slot_;
@@ -854,15 +862,10 @@
  public:
   DECLARE_NODE_TYPE(ForOfStatement)
 
-  void Initialize(Expression* each,
-                  Expression* subject,
-                  Statement* body,
-                  Variable* iterator,
-                  Expression* assign_iterator,
-                  Expression* next_result,
-                  Expression* result_done,
-                  Expression* assign_each) {
-    ForEachStatement::Initialize(each, subject, body);
+  void Initialize(Statement* body, Variable* iterator,
+                  Expression* assign_iterator, Expression* next_result,
+                  Expression* result_done, Expression* assign_each) {
+    ForEachStatement::Initialize(body);
     iterator_ = iterator;
     assign_iterator_ = assign_iterator;
     next_result_ = next_result;
@@ -870,10 +873,6 @@
     assign_each_ = assign_each;
   }
 
-  Expression* iterable() const {
-    return subject();
-  }
-
   Variable* iterator() const {
     return iterator_;
   }
@@ -2526,20 +2525,24 @@
 
   Expression* generator_object() const { return generator_object_; }
   Expression* expression() const { return expression_; }
+  int yield_id() const { return yield_id_; }
 
   void set_generator_object(Expression* e) { generator_object_ = e; }
   void set_expression(Expression* e) { expression_ = e; }
+  void set_yield_id(int yield_id) { yield_id_ = yield_id; }
 
  protected:
   Yield(Zone* zone, Expression* generator_object, Expression* expression,
         int pos)
       : Expression(zone, pos),
         generator_object_(generator_object),
-        expression_(expression) {}
+        expression_(expression),
+        yield_id_(-1) {}
 
  private:
   Expression* generator_object_;
   Expression* expression_;
+  int yield_id_;
 };
 
 
@@ -2584,12 +2587,12 @@
   int start_position() const;
   int end_position() const;
   int SourceSize() const { return end_position() - start_position(); }
-  bool is_declaration() const { return IsDeclaration::decode(bitfield_); }
+  bool is_declaration() const { return function_type() == kDeclaration; }
   bool is_named_expression() const {
-    return IsNamedExpression::decode(bitfield_);
+    return function_type() == kNamedExpression;
   }
   bool is_anonymous_expression() const {
-    return IsAnonymousExpression::decode(bitfield_);
+    return function_type() == kAnonymousExpression;
   }
   LanguageMode language_mode() const;
 
@@ -2666,6 +2669,9 @@
     bitfield_ = ShouldBeUsedOnceHint::update(bitfield_, true);
   }
 
+  FunctionType function_type() const {
+    return FunctionTypeBits::decode(bitfield_);
+  }
   FunctionKind kind() const { return FunctionKindBits::decode(bitfield_); }
 
   int ast_node_count() { return ast_properties_.node_count(); }
@@ -2686,6 +2692,9 @@
     return is_anonymous_expression();
   }
 
+  int yield_count() { return yield_count_; }
+  void set_yield_count(int yield_count) { yield_count_ = yield_count; }
+
  protected:
   FunctionLiteral(Zone* zone, const AstString* name,
                   AstValueFactory* ast_value_factory, Scope* scope,
@@ -2705,12 +2714,10 @@
         materialized_literal_count_(materialized_literal_count),
         expected_property_count_(expected_property_count),
         parameter_count_(parameter_count),
-        function_token_position_(RelocInfo::kNoPosition) {
+        function_token_position_(RelocInfo::kNoPosition),
+        yield_count_(0) {
     bitfield_ =
-        IsDeclaration::encode(function_type == kDeclaration) |
-        IsNamedExpression::encode(function_type == kNamedExpression) |
-        IsAnonymousExpression::encode(function_type == kAnonymousExpression) |
-        Pretenure::encode(false) |
+        FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
         HasDuplicateParameters::encode(has_duplicate_parameters ==
                                        kHasDuplicateParameters) |
         IsFunction::encode(is_function) |
@@ -2720,15 +2727,13 @@
   }
 
  private:
-  class IsDeclaration : public BitField16<bool, 0, 1> {};
-  class IsNamedExpression : public BitField16<bool, 1, 1> {};
-  class IsAnonymousExpression : public BitField16<bool, 2, 1> {};
-  class Pretenure : public BitField16<bool, 3, 1> {};
-  class HasDuplicateParameters : public BitField16<bool, 4, 1> {};
-  class IsFunction : public BitField16<bool, 5, 1> {};
-  class ShouldEagerCompile : public BitField16<bool, 6, 1> {};
-  class ShouldBeUsedOnceHint : public BitField16<bool, 7, 1> {};
-  class FunctionKindBits : public BitField16<FunctionKind, 8, 8> {};
+  class FunctionTypeBits : public BitField16<FunctionType, 0, 2> {};
+  class Pretenure : public BitField16<bool, 2, 1> {};
+  class HasDuplicateParameters : public BitField16<bool, 3, 1> {};
+  class IsFunction : public BitField16<bool, 4, 1> {};
+  class ShouldEagerCompile : public BitField16<bool, 5, 1> {};
+  class ShouldBeUsedOnceHint : public BitField16<bool, 6, 1> {};
+  class FunctionKindBits : public BitField16<FunctionKind, 7, 9> {};
 
   // Start with 16-bit field, which should get packed together
   // with Expression's trailing 16-bit field.
@@ -2746,6 +2751,7 @@
   int expected_property_count_;
   int parameter_count_;
   int function_token_position_;
+  int yield_count_;
 };
 
 
@@ -3041,6 +3047,30 @@
 
 
 // ----------------------------------------------------------------------------
+// Traversing visitor
+// - fully traverses the entire AST.
+
+class AstTraversalVisitor : public AstVisitor {
+ public:
+  explicit AstTraversalVisitor(Isolate* isolate);
+  virtual ~AstTraversalVisitor() {}
+
+  // Iteration left-to-right.
+  void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+  void VisitStatements(ZoneList<Statement*>* statements) override;
+  void VisitExpressions(ZoneList<Expression*>* expressions) override;
+
+// Individual nodes
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+  DISALLOW_COPY_AND_ASSIGN(AstTraversalVisitor);
+};
+
+// ----------------------------------------------------------------------------
 // AstNode factory
 
 class AstNodeFactory final BASE_EMBEDDED {
diff --git a/src/ast/prettyprinter.cc b/src/ast/prettyprinter.cc
index 2a79049..49bff08 100644
--- a/src/ast/prettyprinter.cc
+++ b/src/ast/prettyprinter.cc
@@ -192,10 +192,11 @@
 
 
 void CallPrinter::VisitForOfStatement(ForOfStatement* node) {
-  Find(node->each());
   Find(node->assign_iterator());
-  Find(node->body());
   Find(node->next_result());
+  Find(node->result_done());
+  Find(node->assign_each());
+  Find(node->body());
 }
 
 
@@ -675,11 +676,37 @@
 
 
 void PrettyPrinter::VisitForOfStatement(ForOfStatement* node) {
+  // TODO(adamk): ForOf is largely desugared as part of parsing,
+  // so it's hard to display useful stuff here. Should likely
+  // either bite the bullet and display less or try harder
+  // to preserve more.
   PrintLabels(node->labels());
-  Print("for (");
-  Visit(node->each());
-  Print(" of ");
-  Visit(node->iterable());
+  // The <each> is embedded inside a do-expression by the time we get here.
+  Print("for (<each> of ");
+  if (node->assign_iterator()->IsAssignment() &&
+      node->assign_iterator()->AsAssignment()->value()->IsCall() &&
+      node->assign_iterator()
+          ->AsAssignment()
+          ->value()
+          ->AsCall()
+          ->expression()
+          ->IsProperty() &&
+      node->assign_iterator()
+          ->AsAssignment()
+          ->value()
+          ->AsCall()
+          ->expression()
+          ->IsProperty()) {
+    Visit(node->assign_iterator()
+              ->AsAssignment()
+              ->value()
+              ->AsCall()
+              ->expression()
+              ->AsProperty()
+              ->obj());
+  } else {
+    Print("<iterable>");
+  }
   Print(") ");
   Visit(node->body());
 }
@@ -1055,7 +1082,8 @@
     if (object->IsJSFunction()) {
       Print("JS-Function");
     } else if (object->IsJSArray()) {
-      Print("JS-array[%u]", JSArray::cast(object)->length());
+      Print("JS-array[%u]",
+            Smi::cast(JSArray::cast(object)->length())->value());
     } else if (object->IsJSObject()) {
       Print("JS-Object");
     } else {
@@ -1145,7 +1173,7 @@
   for (int i = 0; i < indent_; i++) {
     Print(". ");
   }
-  Print(txt);
+  Print("%s", txt);
 }
 
 
@@ -1191,6 +1219,10 @@
 const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
   Init();
   { IndentedScope indent(this, "FUNC", program->position());
+    PrintIndented("KIND");
+    Print(" %d\n", program->kind());
+    PrintIndented("YIELD COUNT");
+    Print(" %d\n", program->yield_count());
     PrintLiteralIndented("NAME", program->name(), true);
     PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
     PrintParameters(program->scope());
@@ -1359,6 +1391,8 @@
 
 void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
   IndentedScope indent(this, "DO", node->position());
+  PrintIndented("YIELD COUNT");
+  Print(" %d\n", node->yield_count());
   PrintLabelsIndented(node->labels());
   PrintIndentedVisit("BODY", node->body());
   PrintIndentedVisit("COND", node->cond());
@@ -1367,6 +1401,8 @@
 
 void AstPrinter::VisitWhileStatement(WhileStatement* node) {
   IndentedScope indent(this, "WHILE", node->position());
+  PrintIndented("YIELD COUNT");
+  Print(" %d\n", node->yield_count());
   PrintLabelsIndented(node->labels());
   PrintIndentedVisit("COND", node->cond());
   PrintIndentedVisit("BODY", node->body());
@@ -1375,6 +1411,8 @@
 
 void AstPrinter::VisitForStatement(ForStatement* node) {
   IndentedScope indent(this, "FOR", node->position());
+  PrintIndented("YIELD COUNT");
+  Print(" %d\n", node->yield_count());
   PrintLabelsIndented(node->labels());
   if (node->init()) PrintIndentedVisit("INIT", node->init());
   if (node->cond()) PrintIndentedVisit("COND", node->cond());
@@ -1385,6 +1423,8 @@
 
 void AstPrinter::VisitForInStatement(ForInStatement* node) {
   IndentedScope indent(this, "FOR IN", node->position());
+  PrintIndented("YIELD COUNT");
+  Print(" %d\n", node->yield_count());
   PrintIndentedVisit("FOR", node->each());
   PrintIndentedVisit("IN", node->enumerable());
   PrintIndentedVisit("BODY", node->body());
@@ -1393,13 +1433,13 @@
 
 void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
   IndentedScope indent(this, "FOR OF", node->position());
-  PrintIndentedVisit("FOR", node->each());
-  PrintIndentedVisit("OF", node->iterable());
-  PrintIndentedVisit("BODY", node->body());
+  PrintIndented("YIELD COUNT");
+  Print(" %d\n", node->yield_count());
   PrintIndentedVisit("INIT", node->assign_iterator());
   PrintIndentedVisit("NEXT", node->next_result());
-  PrintIndentedVisit("EACH", node->assign_each());
   PrintIndentedVisit("DONE", node->result_done());
+  PrintIndentedVisit("EACH", node->assign_each());
+  PrintIndentedVisit("BODY", node->body());
 }
 
 
@@ -1522,7 +1562,7 @@
   if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
   buf[i] = '\0';
   PrintIndented("FLAGS ");
-  Print(buf.start());
+  Print("%s", buf.start());
   Print("\n");
 }
 
@@ -1594,7 +1634,9 @@
 
 
 void AstPrinter::VisitYield(Yield* node) {
-  IndentedScope indent(this, "YIELD", node->position());
+  EmbeddedVector<char, 128> buf;
+  SNPrintF(buf, "YIELD id %d", node->yield_id());
+  IndentedScope indent(this, buf.start(), node->position());
   Visit(node->expression());
 }
 
diff --git a/src/ast/prettyprinter.h b/src/ast/prettyprinter.h
index 0186203..4e90294 100644
--- a/src/ast/prettyprinter.h
+++ b/src/ast/prettyprinter.h
@@ -7,6 +7,7 @@
 
 #include "src/allocation.h"
 #include "src/ast/ast.h"
+#include "src/base/compiler-specific.h"
 
 namespace v8 {
 namespace internal {
@@ -20,7 +21,7 @@
   // string. The result string is alive as long as the CallPrinter is alive.
   const char* Print(FunctionLiteral* program, int position);
 
-  void Print(const char* format, ...);
+  void PRINTF_FORMAT(2, 3) Print(const char* format, ...);
 
   void Find(AstNode* node, bool print = false);
 
@@ -62,7 +63,7 @@
   const char* PrintExpression(FunctionLiteral* program);
   const char* PrintProgram(FunctionLiteral* program);
 
-  void Print(const char* format, ...);
+  void PRINTF_FORMAT(2, 3) Print(const char* format, ...);
 
   // Print a node to stdout.
   static void PrintOut(Isolate* isolate, AstNode* node);
diff --git a/src/ast/scopeinfo.cc b/src/ast/scopeinfo.cc
index 4ffc020..10315aa 100644
--- a/src/ast/scopeinfo.cc
+++ b/src/ast/scopeinfo.cc
@@ -438,16 +438,13 @@
   return ContextLocalMaybeAssignedFlag::decode(value);
 }
 
-
-bool ScopeInfo::LocalIsSynthetic(int var) {
-  DCHECK(0 <= var && var < LocalCount());
+bool ScopeInfo::VariableIsSynthetic(String* name) {
   // There's currently no flag stored on the ScopeInfo to indicate that a
   // variable is a compiler-introduced temporary. However, to avoid conflict
   // with user declarations, the current temporaries like .generator_object and
   // .result start with a dot, so we can use that as a flag. It's a hack!
-  Handle<String> name(LocalName(var));
-  return (name->length() > 0 && name->Get(0) == '.') ||
-         name->Equals(*GetIsolate()->factory()->this_string());
+  return name->length() == 0 || name->Get(0) == '.' ||
+         name->Equals(name->GetHeap()->this_string());
 }
 
 
diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc
index 5d4b809..beffa53 100644
--- a/src/ast/scopes.cc
+++ b/src/ast/scopes.cc
@@ -175,7 +175,10 @@
   asm_module_ = false;
   asm_function_ = outer_scope != NULL && outer_scope->asm_module_;
   // Inherit the language mode from the parent scope.
-  language_mode_ = outer_scope != NULL ? outer_scope->language_mode_ : SLOPPY;
+  language_mode_ =
+      is_module_scope()
+          ? STRICT
+          : (outer_scope != NULL ? outer_scope->language_mode_ : SLOPPY);
   outer_scope_calls_sloppy_eval_ = false;
   inner_scope_calls_eval_ = false;
   scope_nonlinear_ = false;
@@ -193,6 +196,7 @@
   scope_info_ = scope_info;
   start_position_ = RelocInfo::kNoPosition;
   end_position_ = RelocInfo::kNoPosition;
+  is_hidden_ = false;
   if (!scope_info.is_null()) {
     scope_calls_eval_ = scope_info->CallsEval();
     language_mode_ = scope_info->language_mode();
@@ -287,6 +291,7 @@
                                : FLAG_print_scopes) {
     scope->Print();
   }
+  scope->CheckScopePositions();
 #endif
 
   info->set_scope(scope);
@@ -553,17 +558,19 @@
   return var;
 }
 
-
-bool Scope::RemoveTemporary(Variable* var) {
+int Scope::RemoveTemporary(Variable* var) {
+  DCHECK_NOT_NULL(var);
   // Most likely (always?) any temporary variable we want to remove
   // was just added before, so we search backwards.
   for (int i = temps_.length(); i-- > 0;) {
     if (temps_[i] == var) {
-      temps_.Remove(i);
-      return true;
+      // Don't shrink temps_, as callers of this method expect
+      // the returned indices to be unique per-scope.
+      temps_[i] = nullptr;
+      return i;
     }
   }
-  return false;
+  return -1;
 }
 
 
@@ -630,6 +637,7 @@
   // context as a whole has forced context allocation.
   for (int i = 0; i < temps_.length(); i++) {
     Variable* var = temps_[i];
+    if (var == nullptr) continue;
     if (var->is_used()) {
       if (var->IsContextSlot()) {
         DCHECK(has_forced_context_allocation());
@@ -953,6 +961,8 @@
   if (is_strict(language_mode())) {
     Indent(n1, "// strict mode scope\n");
   }
+  if (asm_module_) Indent(n1, "// scope is an asm module\n");
+  if (asm_function_) Indent(n1, "// scope is an asm function\n");
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
   if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
   if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
@@ -979,9 +989,15 @@
   }
 
   if (temps_.length() > 0) {
-    Indent(n1, "// temporary vars:\n");
+    bool printed_header = false;
     for (int i = 0; i < temps_.length(); i++) {
-      PrintVar(n1, temps_[i]);
+      if (temps_[i] != nullptr) {
+        if (!printed_header) {
+          printed_header = true;
+          Indent(n1, "// temporary vars:\n");
+        }
+        PrintVar(n1, temps_[i]);
+      }
     }
   }
 
@@ -1007,6 +1023,16 @@
 
   Indent(n0, "}\n");
 }
+
+void Scope::CheckScopePositions() {
+  // A scope is allowed to have invalid positions if it is hidden and has no
+  // inner scopes
+  if (!is_hidden() && inner_scopes_.length() == 0) {
+    CHECK_NE(RelocInfo::kNoPosition, start_position());
+    CHECK_NE(RelocInfo::kNoPosition, end_position());
+  }
+  for (Scope* scope : inner_scopes_) scope->CheckScopePositions();
+}
 #endif  // DEBUG
 
 
@@ -1083,12 +1109,15 @@
     if (var != NULL && proxy->is_assigned()) var->set_maybe_assigned();
     *binding_kind = DYNAMIC_LOOKUP;
     return NULL;
-  } else if (calls_sloppy_eval() && !is_script_scope() &&
-             name_can_be_shadowed) {
+  } else if (calls_sloppy_eval() && is_declaration_scope() &&
+             !is_script_scope() && name_can_be_shadowed) {
     // A variable binding may have been found in an outer scope, but the current
     // scope makes a sloppy 'eval' call, so the found variable may not be
     // the correct one (the 'eval' may introduce a binding with the same name).
     // In that case, change the lookup result to reflect this situation.
+    // Only scopes that can host var bindings (declaration scopes) need be
+    // considered here (this excludes block and catch scopes), and variable
+    // lookups at script scope are always dynamic.
     if (*binding_kind == BOUND) {
       *binding_kind = BOUND_EVAL_SHADOWED;
     } else if (*binding_kind == UNBOUND) {
@@ -1398,6 +1427,7 @@
 void Scope::AllocateNonParameterLocalsAndDeclaredGlobals(Isolate* isolate) {
   // All variables that have no rewrite yet are non-parameter locals.
   for (int i = 0; i < temps_.length(); i++) {
+    if (temps_[i] == nullptr) continue;
     AllocateNonParameterLocal(isolate, temps_[i]);
   }
 
diff --git a/src/ast/scopes.h b/src/ast/scopes.h
index dae70c0..d767a33 100644
--- a/src/ast/scopes.h
+++ b/src/ast/scopes.h
@@ -209,7 +209,9 @@
 
   // Remove a temporary variable. This is for adjusting the scope of
   // temporaries used when desugaring parameter initializers.
-  bool RemoveTemporary(Variable* var);
+  // Returns the index at which it was found in this scope, or -1 if
+  // it was not found.
+  int RemoveTemporary(Variable* var);
 
   // Adds a temporary variable in this scope's TemporaryScope. This is for
   // adjusting the scope of temporaries used when desugaring parameter
@@ -243,6 +245,7 @@
 
   // Set the language mode flag (unless disabled by a global flag).
   void SetLanguageMode(LanguageMode language_mode) {
+    DCHECK(!is_module_scope() || is_strict(language_mode));
     language_mode_ = language_mode;
   }
 
@@ -295,6 +298,10 @@
     end_position_ = statement_pos;
   }
 
+  // Scopes created for desugaring are hidden. I.e. not visible to the debugger.
+  bool is_hidden() const { return is_hidden_; }
+  void set_is_hidden() { is_hidden_ = true; }
+
   // In some cases we want to force context allocation for a whole scope.
   void ForceContextAllocation() {
     DCHECK(!already_resolved());
@@ -574,6 +581,9 @@
 
 #ifdef DEBUG
   void Print(int n = 0);  // n = indentation; n < 0 => don't print recursively
+
+  // Check that the scope has positions assigned.
+  void CheckScopePositions();
 #endif
 
   // ---------------------------------------------------------------------------
@@ -597,7 +607,9 @@
   // variables may be implicitly 'declared' by being used (possibly in
   // an inner scope) with no intervening with statements or eval calls.
   VariableMap variables_;
-  // Compiler-allocated (user-invisible) temporaries.
+  // Compiler-allocated (user-invisible) temporaries. Due to the implementation
+  // of RemoveTemporary(), may contain nulls, which must be skipped-over during
+  // allocation and printing.
   ZoneList<Variable*> temps_;
   // Parameter list in source order.
   ZoneList<Variable*> params_;
@@ -645,6 +657,7 @@
   // Source positions.
   int start_position_;
   int end_position_;
+  bool is_hidden_;
 
   // Computed via PropagateScopeInfo.
   bool outer_scope_calls_sloppy_eval_;
diff --git a/src/ast/variables.cc b/src/ast/variables.cc
index 7b9a5d2..2950db4 100644
--- a/src/ast/variables.cc
+++ b/src/ast/variables.cc
@@ -19,7 +19,6 @@
     case CONST_LEGACY: return "CONST_LEGACY";
     case LET: return "LET";
     case CONST: return "CONST";
-    case IMPORT: return "IMPORT";
     case DYNAMIC: return "DYNAMIC";
     case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
     case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
diff --git a/src/bailout-reason.h b/src/bailout-reason.h
index 92929cf..c44ad85 100644
--- a/src/bailout-reason.h
+++ b/src/bailout-reason.h
@@ -18,7 +18,6 @@
   V(kAPICallReturnedInvalidObject, "API call returned invalid object")         \
   V(kArgumentsObjectValueInATestContext,                                       \
     "Arguments object value in a test context")                                \
-  V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed")      \
   V(kArrayIndexConstantValueTooBig, "Array index constant value too big")      \
   V(kAssignmentToArguments, "Assignment to arguments")                         \
   V(kAssignmentToLetVariableBeforeInitialization,                              \
@@ -62,8 +61,8 @@
   V(kEmitLoadRegisterUnsupportedDoubleImmediate,                               \
     "EmitLoadRegister: Unsupported double immediate")                          \
   V(kEval, "eval")                                                             \
-  V(kExpectedAlignmentMarker, "Expected alignment marker")                     \
   V(kExpectedAllocationSite, "Expected allocation site")                       \
+  V(kExpectedBooleanValue, "Expected boolean value")                           \
   V(kExpectedFunctionObject, "Expected function object in register")           \
   V(kExpectedHeapNumber, "Expected HeapNumber")                                \
   V(kExpectedNativeContext, "Expected native context")                         \
@@ -79,17 +78,15 @@
   V(kForInStatementWithNonLocalEachVariable,                                   \
     "ForInStatement with non-local each variable")                             \
   V(kForOfStatement, "ForOfStatement")                                         \
-  V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned")            \
   V(kFunctionBeingDebugged, "Function is being debugged")                      \
   V(kFunctionCallsEval, "Function calls eval")                                 \
   V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,                      \
     "The function_data field should be a BytecodeArray on interpreter entry")  \
   V(kGeneratedCodeIsTooLarge, "Generated code is too large")                   \
-  V(kGeneratorFailedToResume, "Generator failed to resume")                    \
-  V(kGeneratorResumeMethod, "Generator resume method is being called")         \
   V(kGenerator, "Generator")                                                   \
   V(kGlobalFunctionsMustHaveInitialMap,                                        \
     "Global functions must have initial map")                                  \
+  V(kGraphBuildingFailed, "Optimized graph construction failed")               \
   V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered")       \
   V(kHydrogenFilter, "Optimization disabled by filter")                        \
   V(kImportDeclaration, "Import declaration")                                  \
@@ -102,15 +99,22 @@
   V(kInteger32ToSmiFieldWritingToNonSmiLocation,                               \
     "Integer32ToSmiField writing to non-smi location")                         \
   V(kInvalidBytecode, "Invalid bytecode")                                      \
-  V(kInvalidCaptureReferenced, "Invalid capture referenced")                   \
   V(kInvalidElementsKindForInternalArrayOrInternalPackedArray,                 \
     "Invalid ElementsKind for InternalArray or InternalPackedArray")           \
+  V(kInvalidFrameForFastNewRestArgumentsStub,                                  \
+    "Invalid frame for FastNewRestArgumentsStub")                              \
+  V(kInvalidFrameForFastNewSloppyArgumentsStub,                                \
+    "Invalid frame for FastNewSloppyArgumentsStub")                            \
+  V(kInvalidFrameForFastNewStrictArgumentsStub,                                \
+    "Invalid frame for FastNewStrictArgumentsStub")                            \
   V(kInvalidFullCodegenState, "invalid full-codegen state")                    \
   V(kInvalidHandleScopeLevel, "Invalid HandleScope level")                     \
+  V(kInvalidJumpTableIndex, "Invalid jump table index")                        \
   V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment")  \
   V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment")     \
   V(kInvalidLhsInCountOperation, "Invalid lhs in count operation")             \
   V(kInvalidMinLength, "Invalid min_length")                                   \
+  V(kInvalidRegisterFileInGenerator, "invalid register file in generator")     \
   V(kJSGlobalObjectNativeContextShouldBeANativeContext,                        \
     "JSGlobalObject::native_context should be a native context")               \
   V(kJSGlobalProxyContextShouldNotBeNull,                                      \
@@ -118,7 +122,6 @@
   V(kJSObjectWithFastElementsMapHasSlowElements,                               \
     "JSObject with fast elements map has slow elements")                       \
   V(kLetBindingReInitialization, "Let binding re-initialization")              \
-  V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size")  \
   V(kLiveEdit, "LiveEdit")                                                     \
   V(kLookupVariableInCountOperation, "Lookup variable in count operation")     \
   V(kMapBecameDeprecated, "Map became deprecated")                             \
@@ -143,13 +146,15 @@
   V(kOperandIsASmiAndNotABoundFunction,                                        \
     "Operand is a smi and not a bound function")                               \
   V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function")      \
+  V(kOperandIsASmiAndNotAGeneratorObject,                                      \
+    "Operand is a smi and not a generator object")                             \
   V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name")              \
   V(kOperandIsASmiAndNotAReceiver, "Operand is a smi and not a receiver")      \
   V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string")          \
   V(kOperandIsASmi, "Operand is a smi")                                        \
-  V(kOperandIsNotADate, "Operand is not a date")                               \
   V(kOperandIsNotABoundFunction, "Operand is not a bound function")            \
   V(kOperandIsNotAFunction, "Operand is not a function")                       \
+  V(kOperandIsNotAGeneratorObject, "Operand is not a generator object")        \
   V(kOperandIsNotAName, "Operand is not a name")                               \
   V(kOperandIsNotANumber, "Operand is not a number")                           \
   V(kOperandIsNotAReceiver, "Operand is not a receiver")                       \
@@ -159,7 +164,8 @@
   V(kOperandNotANumber, "Operand not a number")                                \
   V(kObjectTagged, "The object is tagged")                                     \
   V(kObjectNotTagged, "The object is not tagged")                              \
-  V(kOptimizationDisabled, "Optimization is disabled")                         \
+  V(kOptimizationDisabled, "Optimization disabled")                            \
+  V(kOptimizationDisabledForTest, "Optimization disabled for test")            \
   V(kOptimizedTooManyTimes, "Optimized too many times")                        \
   V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister,                   \
     "Out of virtual registers while trying to allocate temp register")         \
@@ -185,8 +191,6 @@
   V(kTailCall, "Tail call")                                                    \
   V(kTheCurrentStackPointerIsBelowCsp,                                         \
     "The current stack pointer is below csp")                                  \
-  V(kTheSourceAndDestinationAreTheSame,                                        \
-    "The source and destination are the same")                                 \
   V(kTheStackWasCorruptedByMacroAssemblerCall,                                 \
     "The stack was corrupted by MacroAssembler::Call()")                       \
   V(kTooManyParametersLocals, "Too many parameters/locals")                    \
@@ -228,8 +232,6 @@
   V(kUnexpectedLevelAfterReturnFromApiCall,                                    \
     "Unexpected level after return from api call")                             \
   V(kUnexpectedNegativeValue, "Unexpected negative value")                     \
-  V(kUnexpectedNumberOfPreAllocatedPropertyFields,                             \
-    "Unexpected number of pre-allocated property fields")                      \
   V(kUnexpectedFunctionIDForInvokeIntrinsic,                                   \
     "Unexpected runtime function id for the InvokeIntrinsic bytecode")         \
   V(kUnexpectedFPCRMode, "Unexpected FPCR mode.")                              \
@@ -252,8 +254,6 @@
   V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments")         \
   V(kUnsupportedPhiUseOfConstVariable,                                         \
     "Unsupported phi use of const or let variable")                            \
-  V(kUnexpectedReturnFromBytecodeHandler,                                      \
-    "Unexpectedly returned from a bytecode handler")                           \
   V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw")          \
   V(kUnsupportedSwitchStatement, "Unsupported switch statement")               \
   V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate")               \
diff --git a/src/base.isolate b/src/base.isolate
index b51de01..592e4f6 100644
--- a/src/base.isolate
+++ b/src/base.isolate
@@ -4,7 +4,7 @@
 {
   'includes': [
     '../third_party/icu/icu.isolate',
-    '../build/config/win/msvs_dependencies.isolate',
+    '../gypfiles/config/win/msvs_dependencies.isolate',
   ],
   'conditions': [
     ['use_custom_libcxx==1', {
@@ -22,13 +22,6 @@
         ],
       },
     }],
-    ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and v8_separate_ignition_snapshot==1', {
-      'variables': {
-        'files': [
-          '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
-        ],
-      },
-    }],
     ['OS=="linux" and component=="shared_library" and target_arch=="ia32"', {
       'variables': {
         'files': [
diff --git a/src/atomic-utils.h b/src/base/atomic-utils.h
similarity index 98%
rename from src/atomic-utils.h
rename to src/base/atomic-utils.h
index 34e1cb0..ac90fd9 100644
--- a/src/atomic-utils.h
+++ b/src/base/atomic-utils.h
@@ -11,7 +11,7 @@
 #include "src/base/macros.h"
 
 namespace v8 {
-namespace internal {
+namespace base {
 
 template <class T>
 class AtomicNumber {
@@ -169,7 +169,7 @@
   base::AtomicWord bits_;
 };
 
-}  // namespace internal
+}  // namespace base
 }  // namespace v8
 
 #endif  // #define V8_ATOMIC_UTILS_H_
diff --git a/src/base/bits.cc b/src/base/bits.cc
index 74d747f..9b949cc 100644
--- a/src/base/bits.cc
+++ b/src/base/bits.cc
@@ -7,6 +7,7 @@
 #include <limits>
 
 #include "src/base/logging.h"
+#include "src/base/safe_math.h"
 
 namespace v8 {
 namespace base {
@@ -48,6 +49,35 @@
   return lhs % rhs;
 }
 
+
+int64_t FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value) {
+  if (value.IsValid())
+    return value.ValueUnsafe();
+
+  // We could return max/min but we don't really expose what the maximum delta
+  // is. Instead, return max/(-max), which is something that clients can reason
+  // about.
+  // TODO(rvargas) crbug.com/332611: don't use internal values.
+  int64_t limit = std::numeric_limits<int64_t>::max();
+  if (value.validity() == internal::RANGE_UNDERFLOW)
+    limit = -limit;
+  return value.ValueOrDefault(limit);
+}
+
+
+int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs) {
+  internal::CheckedNumeric<int64_t> rv(lhs);
+  rv += rhs;
+  return FromCheckedNumeric(rv);
+}
+
+
+int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs) {
+  internal::CheckedNumeric<int64_t> rv(lhs);
+  rv -= rhs;
+  return FromCheckedNumeric(rv);
+}
+
 }  // namespace bits
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/bits.h b/src/base/bits.h
index 0e76624..2e6527b 100644
--- a/src/base/bits.h
+++ b/src/base/bits.h
@@ -16,6 +16,12 @@
 
 namespace v8 {
 namespace base {
+
+namespace internal {
+template <typename T>
+class CheckedNumeric;
+}
+
 namespace bits {
 
 // CountPopulation32(value) returns the number of bits set in |value|.
@@ -296,6 +302,21 @@
   return rhs ? lhs % rhs : 0u;
 }
 
+
+// Clamp |value| on overflow and underflow conditions.
+int64_t FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
+
+
+// SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
+// checks and returns the result.
+int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
+
+
+// SignedSaturatedSub64(lhs, rhs) substracts |lhs| by |rhs|,
+// checks and returns the result.
+int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
+
+
 }  // namespace bits
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/compiler-specific.h b/src/base/compiler-specific.h
index ffd5a44..822893f 100644
--- a/src/base/compiler-specific.h
+++ b/src/base/compiler-specific.h
@@ -26,6 +26,17 @@
 #define WARN_UNUSED_RESULT /* NOT SUPPORTED */
 #endif
 
+// Tell the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+#if defined(__GNUC__)
+#define PRINTF_FORMAT(format_param, dots_param) \
+  __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PRINTF_FORMAT(format_param, dots_param)
+#endif
 
 // The C++ standard requires that static const members have an out-of-class
 // definition (in a single compilation unit), but MSVC chokes on this (when
diff --git a/src/base/format-macros.h b/src/base/format-macros.h
new file mode 100644
index 0000000..5f5fe5d
--- /dev/null
+++ b/src/base/format-macros.h
@@ -0,0 +1,97 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FORMAT_MACROS_H_
+#define BASE_FORMAT_MACROS_H_
+
+// This file defines the format macros for some integer types.
+
+// To print a 64-bit value in a portable way:
+//   int64_t value;
+//   printf("xyz:%" PRId64, value);
+// The "d" in the macro corresponds to %d; you can also use PRIu64 etc.
+//
+// For wide strings, prepend "Wide" to the macro:
+//   int64_t value;
+//   StringPrintf(L"xyz: %" WidePRId64, value);
+//
+// To print a size_t value in a portable way:
+//   size_t size;
+//   printf("xyz: %" PRIuS, size);
+// The "u" in the macro corresponds to %u, and S is for "size".
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "src/base/build_config.h"
+
+#if defined(V8_OS_POSIX) && (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && \
+    !defined(PRId64)
+#error "inttypes.h has already been included before this header file, but "
+#error "without __STDC_FORMAT_MACROS defined."
+#endif
+
+#if defined(V8_OS_POSIX) && !defined(__STDC_FORMAT_MACROS)
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include <inttypes.h>
+
+#if defined(V8_OS_POSIX)
+
+// GCC will concatenate wide and narrow strings correctly, so nothing needs to
+// be done here.
+#define WidePRId64 PRId64
+#define WidePRIu64 PRIu64
+#define WidePRIx64 PRIx64
+
+#if !defined(PRIuS)
+#define PRIuS "zu"
+#endif
+
+// The size of NSInteger and NSUInteger varies between 32-bit and 64-bit
+// architectures and Apple does not provides standard format macros and
+// recommends casting. This has many drawbacks, so instead define macros
+// for formatting those types.
+#if defined(V8_OS_MACOSX)
+#if defined(V8_HOST_ARCH_64_BIT)
+#if !defined(PRIdNS)
+#define PRIdNS "ld"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "lu"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "lx"
+#endif
+#else  // defined(V8_HOST_ARCH_64_BIT)
+#if !defined(PRIdNS)
+#define PRIdNS "d"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "u"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "x"
+#endif
+#endif
+#endif  // defined(V8_OS_MACOSX)
+
+#else  // V8_OS_WIN
+
+#if !defined(PRId64) || !defined(PRIu64) || !defined(PRIx64)
+#error "inttypes.h provided by win toolchain should define these."
+#endif
+
+#define WidePRId64 L"I64d"
+#define WidePRIu64 L"I64u"
+#define WidePRIx64 L"I64x"
+
+#if !defined(PRIuS)
+#define PRIuS "Iu"
+#endif
+
+#endif
+
+#endif  // BASE_FORMAT_MACROS_H_
diff --git a/src/base/logging.h b/src/base/logging.h
index 15322f6..45bc3c4 100644
--- a/src/base/logging.h
+++ b/src/base/logging.h
@@ -10,9 +10,10 @@
 #include <string>
 
 #include "src/base/build_config.h"
+#include "src/base/compiler-specific.h"
 
-extern "C" V8_NORETURN void V8_Fatal(const char* file, int line,
-                                     const char* format, ...);
+extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN
+    void V8_Fatal(const char* file, int line, const char* format, ...);
 
 extern "C" void V8_RuntimeError(const char* file, int line,
                                 const char* message);
diff --git a/src/base/macros.h b/src/base/macros.h
index 3f09b2b..382c30b 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -5,13 +5,8 @@
 #ifndef V8_BASE_MACROS_H_
 #define V8_BASE_MACROS_H_
 
-#include <stddef.h>
-#include <stdint.h>
-
-#include <cstring>
-
-#include "src/base/build_config.h"
 #include "src/base/compiler-specific.h"
+#include "src/base/format-macros.h"
 #include "src/base/logging.h"
 
 
@@ -274,23 +269,27 @@
 #define V8PRIdPTR V8_PTR_PREFIX "d"
 #define V8PRIuPTR V8_PTR_PREFIX "u"
 
+// ptrdiff_t is 't' according to the standard, but MSVC uses 'I'.
+#if V8_CC_MSVC
+#define V8PRIxPTRDIFF "Ix"
+#define V8PRIdPTRDIFF "Id"
+#define V8PRIuPTRDIFF "Iu"
+#else
+#define V8PRIxPTRDIFF "tx"
+#define V8PRIdPTRDIFF "td"
+#define V8PRIuPTRDIFF "tu"
+#endif
+
 // Fix for Mac OS X defining uintptr_t as "unsigned long":
 #if V8_OS_MACOSX
 #undef V8PRIxPTR
 #define V8PRIxPTR "lx"
+#undef V8PRIdPTR
+#define V8PRIdPTR "ld"
 #undef V8PRIuPTR
 #define V8PRIuPTR "lxu"
 #endif
 
-// GCC on S390 31-bit expands 'size_t' to 'long unsigned int'
-// instead of 'int', resulting in compilation errors with %d.
-// The printf format specifier needs to be %zd instead.
-#if V8_HOST_ARCH_S390 && !V8_HOST_ARCH_64_BIT
-#define V8_SIZET_PREFIX "z"
-#else
-#define V8_SIZET_PREFIX ""
-#endif
-
 // The following macro works on both 32 and 64-bit platforms.
 // Usage: instead of writing 0x1234567890123456
 //      write V8_2PART_UINT64_C(0x12345678,90123456);
diff --git a/src/base/platform/condition-variable.cc b/src/base/platform/condition-variable.cc
index fcd6cf7..19c33f8 100644
--- a/src/base/platform/condition-variable.cc
+++ b/src/base/platform/condition-variable.cc
@@ -36,6 +36,19 @@
 
 
 ConditionVariable::~ConditionVariable() {
+#if defined(V8_OS_MACOSX)
+  // This hack is necessary to avoid a fatal pthreads subsystem bug in the
+  // Darwin kernel. http://crbug.com/517681.
+  {
+    Mutex lock;
+    LockGuard<Mutex> l(&lock);
+    struct timespec ts;
+    ts.tv_sec = 0;
+    ts.tv_nsec = 1;
+    pthread_cond_timedwait_relative_np(&native_handle_, &lock.native_handle(),
+                                       &ts);
+  }
+#endif
   int result = pthread_cond_destroy(&native_handle_);
   DCHECK_EQ(0, result);
   USE(result);
diff --git a/src/base/platform/platform-macos.cc b/src/base/platform/platform-macos.cc
index 419281f..ecd440c 100644
--- a/src/base/platform/platform-macos.cc
+++ b/src/base/platform/platform-macos.cc
@@ -86,10 +86,10 @@
     char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
 #endif
     if (code_ptr == NULL) continue;
-    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
+    const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
     const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
-    result.push_back(
-        SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
+    result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
+                                          start + size, slide));
   }
   return result;
 }
diff --git a/src/base/platform/platform.h b/src/base/platform/platform.h
index 5b2dbc9..9464fb1 100644
--- a/src/base/platform/platform.h
+++ b/src/base/platform/platform.h
@@ -26,6 +26,7 @@
 #include <vector>
 
 #include "src/base/build_config.h"
+#include "src/base/compiler-specific.h"
 #include "src/base/platform/mutex.h"
 #include "src/base/platform/semaphore.h"
 
@@ -154,18 +155,19 @@
   // Print output to console. This is mostly used for debugging output.
   // On platforms that has standard terminal output, the output
   // should go to stdout.
-  static void Print(const char* format, ...);
-  static void VPrint(const char* format, va_list args);
+  static PRINTF_FORMAT(1, 2) void Print(const char* format, ...);
+  static PRINTF_FORMAT(1, 0) void VPrint(const char* format, va_list args);
 
   // Print output to a file. This is mostly used for debugging output.
-  static void FPrint(FILE* out, const char* format, ...);
-  static void VFPrint(FILE* out, const char* format, va_list args);
+  static PRINTF_FORMAT(2, 3) void FPrint(FILE* out, const char* format, ...);
+  static PRINTF_FORMAT(2, 0) void VFPrint(FILE* out, const char* format,
+                                          va_list args);
 
   // Print error output to console. This is mostly used for error message
   // output. On platforms that has standard terminal output, the output
   // should go to stderr.
-  static void PrintError(const char* format, ...);
-  static void VPrintError(const char* format, va_list args);
+  static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
+  static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
 
   // Allocate/Free memory used by JS heap. Pages are readable/writable, but
   // they are not guaranteed to be executable unless 'executable' is true.
@@ -222,11 +224,10 @@
 
   // Safe formatting print. Ensures that str is always null-terminated.
   // Returns the number of chars written, or -1 if output was truncated.
-  static int SNPrintF(char* str, int length, const char* format, ...);
-  static int VSNPrintF(char* str,
-                       int length,
-                       const char* format,
-                       va_list args);
+  static PRINTF_FORMAT(3, 4) int SNPrintF(char* str, int length,
+                                          const char* format, ...);
+  static PRINTF_FORMAT(3, 0) int VSNPrintF(char* str, int length,
+                                           const char* format, va_list args);
 
   static char* StrChr(char* str, int c);
   static void StrNCpy(char* dest, int length, const char* src, size_t n);
@@ -234,13 +235,20 @@
   // Support for the profiler.  Can do nothing, in which case ticks
   // occuring in shared libraries will not be properly accounted for.
   struct SharedLibraryAddress {
-    SharedLibraryAddress(
-        const std::string& library_path, uintptr_t start, uintptr_t end)
-        : library_path(library_path), start(start), end(end) {}
+    SharedLibraryAddress(const std::string& library_path, uintptr_t start,
+                         uintptr_t end)
+        : library_path(library_path), start(start), end(end), aslr_slide(0) {}
+    SharedLibraryAddress(const std::string& library_path, uintptr_t start,
+                         uintptr_t end, intptr_t aslr_slide)
+        : library_path(library_path),
+          start(start),
+          end(end),
+          aslr_slide(aslr_slide) {}
 
     std::string library_path;
     uintptr_t start;
     uintptr_t end;
+    intptr_t aslr_slide;
   };
 
   static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses();
diff --git a/src/base/platform/semaphore.cc b/src/base/platform/semaphore.cc
index 284474e..a7e522a 100644
--- a/src/base/platform/semaphore.cc
+++ b/src/base/platform/semaphore.cc
@@ -34,7 +34,6 @@
   USE(result);
 }
 
-
 void Semaphore::Signal() {
   kern_return_t result = semaphore_signal(native_handle_);
   DCHECK_EQ(KERN_SUCCESS, result);
@@ -74,6 +73,19 @@
 #elif V8_OS_POSIX
 
 Semaphore::Semaphore(int count) {
+  // The sem_init() does not check for alignment of the native handle.
+  // Unaligned native handle can later cause a failure in semaphore signal.
+  // Check the alignment here to catch the failure earlier.
+  // Context: crbug.com/605349.
+#if V8_OS_AIX
+  // On aix sem_t is of type int
+  const uintptr_t kSemaphoreAlignmentMask = sizeof(int) - 1;
+#else
+  const uintptr_t kSemaphoreAlignmentMask = sizeof(void*) - 1;
+#endif
+  CHECK_EQ(
+      0, reinterpret_cast<uintptr_t>(&native_handle_) &
+      kSemaphoreAlignmentMask);
   DCHECK(count >= 0);
 #if V8_LIBC_GLIBC
   // sem_init in glibc prior to 2.1 does not zero out semaphores.
@@ -91,7 +103,6 @@
   USE(result);
 }
 
-
 void Semaphore::Signal() {
   int result = sem_post(&native_handle_);
   CHECK_EQ(0, result);
@@ -162,7 +173,6 @@
   USE(result);
 }
 
-
 void Semaphore::Signal() {
   LONG dummy;
   BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
diff --git a/src/base/platform/time.cc b/src/base/platform/time.cc
index 6d5e538..b2355a3 100644
--- a/src/base/platform/time.cc
+++ b/src/base/platform/time.cc
@@ -10,7 +10,9 @@
 #include <unistd.h>
 #endif
 #if V8_OS_MACOSX
+#include <mach/mach.h>
 #include <mach/mach_time.h>
+#include <pthread.h>
 #endif
 
 #include <cstring>
@@ -25,6 +27,51 @@
 #include "src/base/logging.h"
 #include "src/base/platform/platform.h"
 
+namespace {
+
+#if V8_OS_MACOSX
+int64_t ComputeThreadTicks() {
+  mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
+  thread_basic_info_data_t thread_info_data;
+  kern_return_t kr = thread_info(
+      pthread_mach_thread_np(pthread_self()),
+      THREAD_BASIC_INFO,
+      reinterpret_cast<thread_info_t>(&thread_info_data),
+      &thread_info_count);
+  CHECK(kr == KERN_SUCCESS);
+
+  v8::base::CheckedNumeric<int64_t> absolute_micros(
+      thread_info_data.user_time.seconds);
+  absolute_micros *= v8::base::Time::kMicrosecondsPerSecond;
+  absolute_micros += thread_info_data.user_time.microseconds;
+  return absolute_micros.ValueOrDie();
+}
+#elif V8_OS_POSIX
+// Helper function to get results from clock_gettime() and convert to a
+// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
+// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
+// _POSIX_MONOTONIC_CLOCK to -1.
+inline int64_t ClockNow(clockid_t clk_id) {
+#if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
+  defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
+  struct timespec ts;
+  if (clock_gettime(clk_id, &ts) != 0) {
+    UNREACHABLE();
+    return 0;
+  }
+  v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec);
+  result *= v8::base::Time::kMicrosecondsPerSecond;
+  result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
+  return result.ValueOrDie();
+#else  // Monotonic clock not supported.
+  return 0;
+#endif
+}
+#endif  // V8_OS_MACOSX
+
+
+}  // namespace
+
 namespace v8 {
 namespace base {
 
@@ -541,12 +588,7 @@
 #elif V8_OS_SOLARIS
   ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
 #elif V8_OS_POSIX
-  struct timespec ts;
-  int result = clock_gettime(CLOCK_MONOTONIC, &ts);
-  DCHECK_EQ(0, result);
-  USE(result);
-  ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
-           ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+  ticks = ClockNow(CLOCK_MONOTONIC);
 #endif  // V8_OS_MACOSX
   // Make sure we never return 0 here.
   return TimeTicks(ticks + 1);
@@ -560,5 +602,30 @@
 
 #endif  // V8_OS_WIN
 
+
+// TODO(lpy): For windows ThreadTicks implementation,
+// see http://crbug.com/v8/5000
+bool ThreadTicks::IsSupported() {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+    defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID)
+    return true;
+#else
+    return false;
+#endif
+}
+
+
+ThreadTicks ThreadTicks::Now() {
+#if V8_OS_MACOSX
+  return ThreadTicks(ComputeThreadTicks());
+#elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+  defined(V8_OS_ANDROID)
+  return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
+#else
+  UNREACHABLE();
+  return ThreadTicks();
+#endif
+}
+
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/time.h b/src/base/platform/time.h
index c8140ef..e17fc1d 100644
--- a/src/base/platform/time.h
+++ b/src/base/platform/time.h
@@ -9,7 +9,9 @@
 #include <iosfwd>
 #include <limits>
 
+#include "src/base/bits.h"
 #include "src/base/macros.h"
+#include "src/base/safe_math.h"
 
 // Forward declarations.
 extern "C" {
@@ -23,8 +25,14 @@
 namespace base {
 
 class Time;
+class TimeDelta;
 class TimeTicks;
 
+namespace time_internal {
+template<class TimeClass>
+class TimeBase;
+}
+
 // -----------------------------------------------------------------------------
 // TimeDelta
 //
@@ -143,6 +151,7 @@
   }
 
  private:
+  template<class TimeClass> friend class time_internal::TimeBase;
   // Constructs a delta given the duration in microseconds. This is private
   // to avoid confusion by callers with an integer constructor. Use
   // FromSeconds, FromMilliseconds, etc. instead.
@@ -153,34 +162,122 @@
 };
 
 
+namespace time_internal {
+
+// TimeBase--------------------------------------------------------------------
+
+// Provides value storage and comparison/math operations common to all time
+// classes. Each subclass provides for strong type-checking to ensure
+// semantically meaningful comparison/math of time values from the same clock
+// source or timeline.
+template<class TimeClass>
+class TimeBase {
+ public:
+  static const int64_t kHoursPerDay = 24;
+  static const int64_t kMillisecondsPerSecond = 1000;
+  static const int64_t kMillisecondsPerDay =
+      kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
+  static const int64_t kMicrosecondsPerMillisecond = 1000;
+  static const int64_t kMicrosecondsPerSecond =
+      kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
+  static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+  static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+  static const int64_t kMicrosecondsPerDay =
+      kMicrosecondsPerHour * kHoursPerDay;
+  static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+  static const int64_t kNanosecondsPerMicrosecond = 1000;
+  static const int64_t kNanosecondsPerSecond =
+      kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
+
+  // Returns true if this object has not been initialized.
+  //
+  // Warning: Be careful when writing code that performs math on time values,
+  // since it's possible to produce a valid "zero" result that should not be
+  // interpreted as a "null" value.
+  bool IsNull() const {
+    return us_ == 0;
+  }
+
+  // Returns true if this object represents the maximum time.
+  bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+  // For serializing only. Use FromInternalValue() to reconstitute. Please don't
+  // use this and do arithmetic on it, as it is more error prone than using the
+  // provided operators.
+  int64_t ToInternalValue() const { return us_; }
+
+  TimeClass& operator=(TimeClass other) {
+    us_ = other.us_;
+    return *(static_cast<TimeClass*>(this));
+  }
+
+  // Compute the difference between two times.
+  TimeDelta operator-(TimeClass other) const {
+    return TimeDelta::FromMicroseconds(us_ - other.us_);
+  }
+
+  // Return a new time modified by some delta.
+  TimeClass operator+(TimeDelta delta) const {
+    return TimeClass(bits::SignedSaturatedAdd64(delta.delta_, us_));
+  }
+  TimeClass operator-(TimeDelta delta) const {
+    return TimeClass(-bits::SignedSaturatedSub64(delta.delta_, us_));
+  }
+
+  // Modify by some time delta.
+  TimeClass& operator+=(TimeDelta delta) {
+    return static_cast<TimeClass&>(*this = (*this + delta));
+  }
+  TimeClass& operator-=(TimeDelta delta) {
+    return static_cast<TimeClass&>(*this = (*this - delta));
+  }
+
+  // Comparison operators
+  bool operator==(TimeClass other) const {
+    return us_ == other.us_;
+  }
+  bool operator!=(TimeClass other) const {
+    return us_ != other.us_;
+  }
+  bool operator<(TimeClass other) const {
+    return us_ < other.us_;
+  }
+  bool operator<=(TimeClass other) const {
+    return us_ <= other.us_;
+  }
+  bool operator>(TimeClass other) const {
+    return us_ > other.us_;
+  }
+  bool operator>=(TimeClass other) const {
+    return us_ >= other.us_;
+  }
+
+  // Converts an integer value representing TimeClass to a class. This is used
+  // when deserializing a |TimeClass| structure, using a value known to be
+  // compatible. It is not provided as a constructor because the integer type
+  // may be unclear from the perspective of a caller.
+  static TimeClass FromInternalValue(int64_t us) { return TimeClass(us); }
+
+ protected:
+  explicit TimeBase(int64_t us) : us_(us) {}
+
+  // Time value in a microsecond timebase.
+  int64_t us_;
+};
+
+}  // namespace time_internal
+
+
 // -----------------------------------------------------------------------------
 // Time
 //
 // This class represents an absolute point in time, internally represented as
 // microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
 
-class Time final {
+class Time final : public time_internal::TimeBase<Time> {
  public:
-  static const int64_t kMillisecondsPerSecond = 1000;
-  static const int64_t kMicrosecondsPerMillisecond = 1000;
-  static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
-                                                kMillisecondsPerSecond;
-  static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
-  static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
-  static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
-  static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
-  static const int64_t kNanosecondsPerMicrosecond = 1000;
-  static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
-                                               kMicrosecondsPerSecond;
-
   // Contains the NULL time. Use Time::Now() to get the current time.
-  Time() : us_(0) {}
-
-  // Returns true if the time object has not been initialized.
-  bool IsNull() const { return us_ == 0; }
-
-  // Returns true if the time object is the maximum time.
-  bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+  Time() : TimeBase(0) {}
 
   // Returns the current time. Watch out, the system might adjust its clock
   // in which case time will actually go backwards. We don't guarantee that
@@ -200,15 +297,6 @@
   // with which we might compare it.
   static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
 
-  // Converts to/from internal values. The meaning of the "internal value" is
-  // completely up to the implementation, so it should be treated as opaque.
-  static Time FromInternalValue(int64_t value) {
-    return Time(value);
-  }
-  int64_t ToInternalValue() const {
-    return us_;
-  }
-
   // Converts to/from POSIX time specs.
   static Time FromTimespec(struct timespec ts);
   struct timespec ToTimespec() const;
@@ -226,59 +314,9 @@
   static Time FromJsTime(double ms_since_epoch);
   double ToJsTime() const;
 
-  Time& operator=(const Time& other) {
-    us_ = other.us_;
-    return *this;
-  }
-
-  // Compute the difference between two times.
-  TimeDelta operator-(const Time& other) const {
-    return TimeDelta::FromMicroseconds(us_ - other.us_);
-  }
-
-  // Modify by some time delta.
-  Time& operator+=(const TimeDelta& delta) {
-    us_ += delta.InMicroseconds();
-    return *this;
-  }
-  Time& operator-=(const TimeDelta& delta) {
-    us_ -= delta.InMicroseconds();
-    return *this;
-  }
-
-  // Return a new time modified by some delta.
-  Time operator+(const TimeDelta& delta) const {
-    return Time(us_ + delta.InMicroseconds());
-  }
-  Time operator-(const TimeDelta& delta) const {
-    return Time(us_ - delta.InMicroseconds());
-  }
-
-  // Comparison operators
-  bool operator==(const Time& other) const {
-    return us_ == other.us_;
-  }
-  bool operator!=(const Time& other) const {
-    return us_ != other.us_;
-  }
-  bool operator<(const Time& other) const {
-    return us_ < other.us_;
-  }
-  bool operator<=(const Time& other) const {
-    return us_ <= other.us_;
-  }
-  bool operator>(const Time& other) const {
-    return us_ > other.us_;
-  }
-  bool operator>=(const Time& other) const {
-    return us_ >= other.us_;
-  }
-
  private:
-  explicit Time(int64_t us) : us_(us) {}
-
-  // Time in microseconds in UTC.
-  int64_t us_;
+  friend class time_internal::TimeBase<Time>;
+  explicit Time(int64_t us) : TimeBase(us) {}
 };
 
 std::ostream& operator<<(std::ostream&, const Time&);
@@ -298,9 +336,9 @@
 // Time::Now() may actually decrease or jump).  But note that TimeTicks may
 // "stand still", for example if the computer suspended.
 
-class TimeTicks final {
+class TimeTicks final : public time_internal::TimeBase<TimeTicks> {
  public:
-  TimeTicks() : ticks_(0) {}
+  TimeTicks() : TimeBase(0) {}
 
   // Platform-dependent tick count representing "right now."
   // The resolution of this clock is ~1-15ms.  Resolution varies depending
@@ -318,79 +356,43 @@
   // Returns true if the high-resolution clock is working on this system.
   static bool IsHighResolutionClockWorking();
 
-  // Returns true if this object has not been initialized.
-  bool IsNull() const { return ticks_ == 0; }
-
-  // Converts to/from internal values. The meaning of the "internal value" is
-  // completely up to the implementation, so it should be treated as opaque.
-  static TimeTicks FromInternalValue(int64_t value) {
-    return TimeTicks(value);
-  }
-  int64_t ToInternalValue() const {
-    return ticks_;
-  }
-
-  TimeTicks& operator=(const TimeTicks other) {
-    ticks_ = other.ticks_;
-    return *this;
-  }
-
-  // Compute the difference between two times.
-  TimeDelta operator-(const TimeTicks other) const {
-    return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
-  }
-
-  // Modify by some time delta.
-  TimeTicks& operator+=(const TimeDelta& delta) {
-    ticks_ += delta.InMicroseconds();
-    return *this;
-  }
-  TimeTicks& operator-=(const TimeDelta& delta) {
-    ticks_ -= delta.InMicroseconds();
-    return *this;
-  }
-
-  // Return a new TimeTicks modified by some delta.
-  TimeTicks operator+(const TimeDelta& delta) const {
-    return TimeTicks(ticks_ + delta.InMicroseconds());
-  }
-  TimeTicks operator-(const TimeDelta& delta) const {
-    return TimeTicks(ticks_ - delta.InMicroseconds());
-  }
-
-  // Comparison operators
-  bool operator==(const TimeTicks& other) const {
-    return ticks_ == other.ticks_;
-  }
-  bool operator!=(const TimeTicks& other) const {
-    return ticks_ != other.ticks_;
-  }
-  bool operator<(const TimeTicks& other) const {
-    return ticks_ < other.ticks_;
-  }
-  bool operator<=(const TimeTicks& other) const {
-    return ticks_ <= other.ticks_;
-  }
-  bool operator>(const TimeTicks& other) const {
-    return ticks_ > other.ticks_;
-  }
-  bool operator>=(const TimeTicks& other) const {
-    return ticks_ >= other.ticks_;
-  }
-
  private:
-  // Please use Now() to create a new object. This is for internal use
-  // and testing. Ticks is in microseconds.
-  explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
+  friend class time_internal::TimeBase<TimeTicks>;
 
-  // Tick count in microseconds.
-  int64_t ticks_;
+  // Please use Now() to create a new object. This is for internal use
+  // and testing. Ticks are in microseconds.
+  explicit TimeTicks(int64_t ticks) : TimeBase(ticks) {}
 };
 
 inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
   return ticks + delta;
 }
 
+
+// ThreadTicks ----------------------------------------------------------------
+
+// Represents a clock, specific to a particular thread, than runs only while the
+// thread is running.
+class ThreadTicks final : public time_internal::TimeBase<ThreadTicks> {
+ public:
+  ThreadTicks() : TimeBase(0) {}
+
+  // Returns true if ThreadTicks::Now() is supported on this system.
+  static bool IsSupported();
+
+  // Returns thread-specific CPU-time on systems that support this feature.
+  // Needs to be guarded with a call to IsSupported(). Use this timer
+  // to (approximately) measure how much time the calling thread spent doing
+  // actual work vs. being de-scheduled. May return bogus results if the thread
+  // migrates to another CPU between two calls. Returns an empty ThreadTicks
+  // object until the initialization is completed.
+  static ThreadTicks Now();
+
+ private:
+  // This is for internal use and testing. Ticks are in microseconds.
+  explicit ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
+};
+
 }  // namespace base
 }  // namespace v8
 
diff --git a/src/bignum.cc b/src/bignum.cc
index e7c6747..e2a9c4e 100644
--- a/src/bignum.cc
+++ b/src/bignum.cc
@@ -539,13 +539,6 @@
 }
 
 
-static char HexCharOfValue(int value) {
-  DCHECK(0 <= value && value <= 16);
-  if (value < 10) return value + '0';
-  return value - 10 + 'A';
-}
-
-
 bool Bignum::ToHexString(char* buffer, int buffer_size) const {
   DCHECK(IsClamped());
   // Each bigit must be printable as separate hex-character.
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index f67065d..70eec2b 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -10,6 +10,7 @@
 #include "src/extensions/externalize-string-extension.h"
 #include "src/extensions/free-buffer-extension.h"
 #include "src/extensions/gc-extension.h"
+#include "src/extensions/ignition-statistics-extension.h"
 #include "src/extensions/statistics-extension.h"
 #include "src/extensions/trigger-failure-extension.h"
 #include "src/heap/heap.h"
@@ -73,7 +74,7 @@
 v8::Extension* Bootstrapper::externalize_string_extension_ = NULL;
 v8::Extension* Bootstrapper::statistics_extension_ = NULL;
 v8::Extension* Bootstrapper::trigger_failure_extension_ = NULL;
-
+v8::Extension* Bootstrapper::ignition_statistics_extension_ = NULL;
 
 void Bootstrapper::InitializeOncePerProcess() {
   free_buffer_extension_ = new FreeBufferExtension;
@@ -86,6 +87,8 @@
   v8::RegisterExtension(statistics_extension_);
   trigger_failure_extension_ = new TriggerFailureExtension;
   v8::RegisterExtension(trigger_failure_extension_);
+  ignition_statistics_extension_ = new IgnitionStatisticsExtension;
+  v8::RegisterExtension(ignition_statistics_extension_);
 }
 
 
@@ -100,6 +103,8 @@
   statistics_extension_ = NULL;
   delete trigger_failure_extension_;
   trigger_failure_extension_ = NULL;
+  delete ignition_statistics_extension_;
+  ignition_statistics_extension_ = NULL;
 }
 
 
@@ -157,7 +162,8 @@
   Handle<JSFunction> GetThrowTypeErrorIntrinsic(Builtins::Name builtin_name);
 
   void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
-  void CreateIteratorMaps();
+  void CreateIteratorMaps(Handle<JSFunction> empty);
+  void CreateAsyncFunctionMaps(Handle<JSFunction> empty);
   void CreateJSProxyMaps();
 
   // Make the "arguments" and "caller" properties throw a TypeError on access.
@@ -200,6 +206,7 @@
   HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
   HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
   DECLARE_FEATURE_INITIALIZATION(promise_extra, "")
+  DECLARE_FEATURE_INITIALIZATION(intl_extra, "")
 #undef DECLARE_FEATURE_INITIALIZATION
 
   Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
@@ -354,7 +361,6 @@
   }
 }
 
-
 namespace {
 
 void InstallFunction(Handle<JSObject> target, Handle<Name> property_name,
@@ -367,20 +373,18 @@
   function->shared()->set_native(true);
 }
 
-
-static void InstallFunction(Handle<JSObject> target,
-                            Handle<JSFunction> function, Handle<Name> name,
-                            PropertyAttributes attributes = DONT_ENUM) {
+void InstallFunction(Handle<JSObject> target, Handle<JSFunction> function,
+                     Handle<Name> name,
+                     PropertyAttributes attributes = DONT_ENUM) {
   Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
   InstallFunction(target, name, function, name_string, attributes);
 }
 
-
-static Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
-                                         InstanceType type, int instance_size,
-                                         MaybeHandle<JSObject> maybe_prototype,
-                                         Builtins::Name call,
-                                         bool strict_function_map = false) {
+Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
+                                  InstanceType type, int instance_size,
+                                  MaybeHandle<JSObject> maybe_prototype,
+                                  Builtins::Name call,
+                                  bool strict_function_map = false) {
   Factory* factory = isolate->factory();
   Handle<Code> call_code(isolate->builtins()->builtin(call));
   Handle<JSObject> prototype;
@@ -394,7 +398,6 @@
                                                     strict_function_map);
 }
 
-
 Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
                                    InstanceType type, int instance_size,
                                    MaybeHandle<JSObject> maybe_prototype,
@@ -409,7 +412,6 @@
   return function;
 }
 
-
 Handle<JSFunction> InstallFunction(Handle<JSObject> target, const char* name,
                                    InstanceType type, int instance_size,
                                    MaybeHandle<JSObject> maybe_prototype,
@@ -422,8 +424,40 @@
                          strict_function_map);
 }
 
-}  // namespace
+Handle<JSFunction> SimpleCreateFunction(Isolate* isolate, Handle<String> name,
+                                        Builtins::Name call, int len,
+                                        bool adapt) {
+  Handle<JSFunction> fun =
+      CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
+                     MaybeHandle<JSObject>(), call);
+  if (adapt) {
+    fun->shared()->set_internal_formal_parameter_count(len);
+  } else {
+    fun->shared()->DontAdaptArguments();
+  }
+  fun->shared()->set_length(len);
+  return fun;
+}
 
+Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+                                         Handle<String> name,
+                                         Builtins::Name call, int len,
+                                         bool adapt) {
+  Handle<JSFunction> fun =
+      SimpleCreateFunction(base->GetIsolate(), name, call, len, adapt);
+  InstallFunction(base, fun, name, DONT_ENUM);
+  return fun;
+}
+
+Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+                                         const char* name, Builtins::Name call,
+                                         int len, bool adapt) {
+  Factory* const factory = base->GetIsolate()->factory();
+  return SimpleInstallFunction(base, factory->InternalizeUtf8String(name), call,
+                               len, adapt);
+}
+
+}  // namespace
 
 void Genesis::SetFunctionInstanceDescriptor(Handle<Map> map,
                                             FunctionMode function_mode) {
@@ -709,22 +743,43 @@
       CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
 }
 
-
-void Genesis::CreateIteratorMaps() {
+void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
   // Create iterator-related meta-objects.
   Handle<JSObject> iterator_prototype =
       factory()->NewJSObject(isolate()->object_function(), TENURED);
   Handle<JSObject> generator_object_prototype =
       factory()->NewJSObject(isolate()->object_function(), TENURED);
+  native_context()->set_initial_generator_prototype(
+      *generator_object_prototype);
+  SetObjectPrototype(generator_object_prototype, iterator_prototype);
   Handle<JSObject> generator_function_prototype =
       factory()->NewJSObject(isolate()->object_function(), TENURED);
-  SetObjectPrototype(generator_object_prototype, iterator_prototype);
+  SetObjectPrototype(generator_function_prototype, empty);
 
+  JSObject::AddProperty(
+      generator_function_prototype, factory()->to_string_tag_symbol(),
+      factory()->NewStringFromAsciiChecked("GeneratorFunction"),
+      static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
   JSObject::AddProperty(generator_function_prototype,
-                        factory()->InternalizeUtf8String("prototype"),
+                        factory()->prototype_string(),
                         generator_object_prototype,
                         static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
 
+  JSObject::AddProperty(generator_object_prototype,
+                        factory()->constructor_string(),
+                        generator_function_prototype,
+                        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+  JSObject::AddProperty(generator_object_prototype,
+                        factory()->to_string_tag_symbol(),
+                        factory()->NewStringFromAsciiChecked("Generator"),
+                        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+  SimpleInstallFunction(generator_object_prototype, "next",
+                        Builtins::kGeneratorPrototypeNext, 1, true);
+  SimpleInstallFunction(generator_object_prototype, "return",
+                        Builtins::kGeneratorPrototypeReturn, 1, true);
+  SimpleInstallFunction(generator_object_prototype, "throw",
+                        Builtins::kGeneratorPrototypeThrow, 1, true);
+
   // Create maps for generator functions and their prototypes.  Store those
   // maps in the native context. The "prototype" property descriptor is
   // writable, non-enumerable, and non-configurable (as per ES6 draft
@@ -754,6 +809,32 @@
       *generator_object_prototype_map);
 }
 
+void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
+  // %AsyncFunctionPrototype% intrinsic
+  Handle<JSObject> async_function_prototype =
+      factory()->NewJSObject(isolate()->object_function(), TENURED);
+  SetObjectPrototype(async_function_prototype, empty);
+
+  JSObject::AddProperty(async_function_prototype,
+                        factory()->to_string_tag_symbol(),
+                        factory()->NewStringFromAsciiChecked("AsyncFunction"),
+                        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+  Handle<Map> strict_function_map(
+      native_context()->strict_function_without_prototype_map());
+  Handle<Map> sloppy_async_function_map =
+      Map::Copy(strict_function_map, "SloppyAsyncFunction");
+  sloppy_async_function_map->set_is_constructor(false);
+  Map::SetPrototype(sloppy_async_function_map, async_function_prototype);
+  native_context()->set_sloppy_async_function_map(*sloppy_async_function_map);
+
+  Handle<Map> strict_async_function_map =
+      Map::Copy(strict_function_map, "StrictAsyncFunction");
+  strict_async_function_map->set_is_constructor(false);
+  Map::SetPrototype(strict_async_function_map, async_function_prototype);
+  native_context()->set_strict_async_function_map(*strict_async_function_map);
+}
+
 void Genesis::CreateJSProxyMaps() {
   // Allocate the different maps for all Proxy types.
   // Next to the default proxy, we need maps indicating callable and
@@ -807,11 +888,11 @@
   Heap* heap = context->GetIsolate()->heap();
 #ifdef DEBUG
   { // NOLINT
-    DCHECK(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
+    DCHECK(context->next_context_link()->IsUndefined());
     // Check that context is not in the list yet.
     for (Object* current = heap->native_contexts_list();
          !current->IsUndefined();
-         current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
+         current = Context::cast(current)->next_context_link()) {
       DCHECK(current != context);
     }
   }
@@ -971,45 +1052,6 @@
   TransferIndexedProperties(global_object_from_snapshot, global_object);
 }
 
-
-static Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
-                                               Handle<String> name,
-                                               Builtins::Name call, int len,
-                                               bool adapt) {
-  Handle<JSFunction> fun =
-      CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
-                     MaybeHandle<JSObject>(), call);
-  if (adapt) {
-    fun->shared()->set_internal_formal_parameter_count(len);
-  } else {
-    fun->shared()->DontAdaptArguments();
-  }
-  fun->shared()->set_length(len);
-  return fun;
-}
-
-
-static Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
-                                                Handle<String> name,
-                                                Builtins::Name call, int len,
-                                                bool adapt) {
-  Handle<JSFunction> fun =
-      SimpleCreateFunction(base->GetIsolate(), name, call, len, adapt);
-  InstallFunction(base, fun, name, DONT_ENUM);
-  return fun;
-}
-
-
-static Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
-                                                const char* name,
-                                                Builtins::Name call, int len,
-                                                bool adapt) {
-  Factory* const factory = base->GetIsolate()->factory();
-  return SimpleInstallFunction(base, factory->InternalizeUtf8String(name), call,
-                               len, adapt);
-}
-
-
 static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
                                              Handle<JSFunction> function,
                                              int context_index) {
@@ -1049,40 +1091,74 @@
     Handle<JSFunction> object_function = isolate->object_function();
     JSObject::AddProperty(global_object, object_name, object_function,
                           DONT_ENUM);
+
     SimpleInstallFunction(object_function, factory->assign_string(),
                           Builtins::kObjectAssign, 2, false);
     SimpleInstallFunction(object_function, factory->create_string(),
                           Builtins::kObjectCreate, 2, false);
-    Handle<JSFunction> object_freeze = SimpleInstallFunction(
-        object_function, "freeze", Builtins::kObjectFreeze, 1, false);
-    native_context()->set_object_freeze(*object_freeze);
     SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
                           Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
     SimpleInstallFunction(object_function, "getOwnPropertyNames",
                           Builtins::kObjectGetOwnPropertyNames, 1, false);
     SimpleInstallFunction(object_function, "getOwnPropertySymbols",
                           Builtins::kObjectGetOwnPropertySymbols, 1, false);
-    SimpleInstallFunction(object_function, "is", Builtins::kObjectIs, 2, true);
-    Handle<JSFunction> object_is_extensible =
-        SimpleInstallFunction(object_function, "isExtensible",
-                              Builtins::kObjectIsExtensible, 1, false);
+    SimpleInstallFunction(object_function, "is",
+                          Builtins::kObjectIs, 2, true);
+    SimpleInstallFunction(object_function, "preventExtensions",
+                          Builtins::kObjectPreventExtensions, 1, false);
+    SimpleInstallFunction(object_function, "seal",
+                          Builtins::kObjectSeal, 1, false);
+
+    Handle<JSFunction> object_define_properties = SimpleInstallFunction(
+        object_function, "defineProperties",
+        Builtins::kObjectDefineProperties, 2, true);
+    native_context()->set_object_define_properties(*object_define_properties);
+
+    Handle<JSFunction> object_define_property = SimpleInstallFunction(
+        object_function, factory->defineProperty_string(),
+        Builtins::kObjectDefineProperty, 3, true);
+    native_context()->set_object_define_property(*object_define_property);
+
+    Handle<JSFunction> object_freeze = SimpleInstallFunction(
+        object_function, "freeze", Builtins::kObjectFreeze, 1, false);
+    native_context()->set_object_freeze(*object_freeze);
+
+    Handle<JSFunction> object_get_prototype_of = SimpleInstallFunction(
+        object_function, "getPrototypeOf", Builtins::kObjectGetPrototypeOf,
+        1, false);
+    native_context()->set_object_get_prototype_of(*object_get_prototype_of);
+
+    Handle<JSFunction> object_is_extensible = SimpleInstallFunction(
+        object_function, "isExtensible", Builtins::kObjectIsExtensible,
+        1, false);
     native_context()->set_object_is_extensible(*object_is_extensible);
+
     Handle<JSFunction> object_is_frozen = SimpleInstallFunction(
         object_function, "isFrozen", Builtins::kObjectIsFrozen, 1, false);
     native_context()->set_object_is_frozen(*object_is_frozen);
+
     Handle<JSFunction> object_is_sealed = SimpleInstallFunction(
         object_function, "isSealed", Builtins::kObjectIsSealed, 1, false);
     native_context()->set_object_is_sealed(*object_is_sealed);
+
     Handle<JSFunction> object_keys = SimpleInstallFunction(
         object_function, "keys", Builtins::kObjectKeys, 1, false);
     native_context()->set_object_keys(*object_keys);
-    SimpleInstallFunction(object_function, "preventExtensions",
-                          Builtins::kObjectPreventExtensions, 1, false);
-    SimpleInstallFunction(object_function, "seal", Builtins::kObjectSeal, 1,
-                          false);
 
+    SimpleInstallFunction(isolate->initial_object_prototype(),
+                          "__defineGetter__", Builtins::kObjectDefineGetter, 2,
+                          true);
+    SimpleInstallFunction(isolate->initial_object_prototype(),
+                          "__defineSetter__", Builtins::kObjectDefineSetter, 2,
+                          true);
     SimpleInstallFunction(isolate->initial_object_prototype(), "hasOwnProperty",
                           Builtins::kObjectHasOwnProperty, 1, true);
+    SimpleInstallFunction(isolate->initial_object_prototype(),
+                          "__lookupGetter__", Builtins::kObjectLookupGetter, 1,
+                          true);
+    SimpleInstallFunction(isolate->initial_object_prototype(),
+                          "__lookupSetter__", Builtins::kObjectLookupSetter, 1,
+                          true);
   }
 
   Handle<JSObject> global(native_context()->global_object());
@@ -1115,7 +1191,7 @@
     Handle<JSFunction> has_instance = InstallFunction(
         prototype, factory->has_instance_symbol(), JS_OBJECT_TYPE,
         JSObject::kHeaderSize, MaybeHandle<JSObject>(),
-        Builtins::kFunctionHasInstance,
+        Builtins::kFunctionPrototypeHasInstance,
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
 
     // Set the expected parameters for @@hasInstance to 1; required by builtin.
@@ -1124,9 +1200,6 @@
     // Set the length for the function to satisfy ECMA-262.
     has_instance->shared()->set_length(1);
 
-    // Install in the native context
-    native_context()->set_ordinary_has_instance(*has_instance);
-
     // Install the "constructor" property on the %FunctionPrototype%.
     JSObject::AddProperty(prototype, factory->constructor_string(),
                           function_fun, DONT_ENUM);
@@ -1252,6 +1325,22 @@
     // Install the String.fromCharCode function.
     SimpleInstallFunction(string_fun, "fromCharCode",
                           Builtins::kStringFromCharCode, 1, false);
+
+    // Create the %StringPrototype%
+    Handle<JSValue> prototype =
+        Handle<JSValue>::cast(factory->NewJSObject(string_fun, TENURED));
+    prototype->set_value(isolate->heap()->empty_string());
+    Accessors::FunctionSetPrototype(string_fun, prototype).Assert();
+
+    // Install the "constructor" property on the {prototype}.
+    JSObject::AddProperty(prototype, factory->constructor_string(), string_fun,
+                          DONT_ENUM);
+
+    // Install the String.prototype methods.
+    SimpleInstallFunction(prototype, "charAt", Builtins::kStringPrototypeCharAt,
+                          1, true);
+    SimpleInstallFunction(prototype, "charCodeAt",
+                          Builtins::kStringPrototypeCharCodeAt, 1, true);
   }
 
   {
@@ -1702,18 +1791,21 @@
         static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
     Map::EnsureDescriptorSlack(map, 2);
 
+    Handle<AccessorInfo> bound_length =
+        Accessors::BoundFunctionLengthInfo(isolate, roc_attribs);
     {  // length
-      DataDescriptor d(factory->length_string(), JSBoundFunction::kLengthIndex,
-                       roc_attribs, Representation::Tagged());
+      AccessorConstantDescriptor d(factory->length_string(), bound_length,
+                                   roc_attribs);
       map->AppendDescriptor(&d);
     }
-    {  // name
-      DataDescriptor d(factory->name_string(), JSBoundFunction::kNameIndex,
-                       roc_attribs, Representation::Tagged());
+    Handle<AccessorInfo> bound_name =
+        Accessors::BoundFunctionNameInfo(isolate, roc_attribs);
+    {  // length
+      AccessorConstantDescriptor d(factory->name_string(), bound_name,
+                                   roc_attribs);
       map->AppendDescriptor(&d);
     }
-
-    map->SetInObjectProperties(2);
+    map->SetInObjectProperties(0);
     native_context()->set_bound_function_without_constructor_map(*map);
 
     map = Map::Copy(map, "IsConstructor");
@@ -1887,6 +1979,7 @@
   HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
   HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
   FEATURE_INITIALIZE_GLOBAL(promise_extra, "")
+  FEATURE_INITIALIZE_GLOBAL(intl_extra, "")
 #undef FEATURE_INITIALIZE_GLOBAL
 }
 
@@ -2169,6 +2262,13 @@
         isolate, generator_function_function,
         Context::GENERATOR_FUNCTION_FUNCTION_INDEX);
 
+    SetObjectPrototype(generator_function_function,
+                       isolate->function_function());
+    JSObject::AddProperty(
+        generator_function_prototype, factory->constructor_string(),
+        generator_function_function,
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
     native_context->sloppy_generator_function_map()->SetConstructor(
         *generator_function_function);
     native_context->strict_generator_function_map()->SetConstructor(
@@ -2341,6 +2441,42 @@
           script_is_embedder_debug_script, attribs);
       script_map->AppendDescriptor(&d);
     }
+
+    {
+      PrototypeIterator iter(native_context->sloppy_async_function_map());
+      Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
+
+      static const bool kUseStrictFunctionMap = true;
+      Handle<JSFunction> async_function_constructor = InstallFunction(
+          container, "AsyncFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
+          async_function_prototype, Builtins::kAsyncFunctionConstructor,
+          kUseStrictFunctionMap);
+      async_function_constructor->set_prototype_or_initial_map(
+          native_context->sloppy_async_function_map());
+      async_function_constructor->shared()->DontAdaptArguments();
+      async_function_constructor->shared()->set_construct_stub(
+          *isolate->builtins()->AsyncFunctionConstructor());
+      async_function_constructor->shared()->set_length(1);
+      InstallWithIntrinsicDefaultProto(isolate, async_function_constructor,
+                                       Context::ASYNC_FUNCTION_FUNCTION_INDEX);
+
+      JSObject::AddProperty(
+          async_function_prototype, factory->constructor_string(),
+          async_function_constructor,
+          static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+      JSFunction::SetPrototype(async_function_constructor,
+                               async_function_prototype);
+
+      Handle<JSFunction> async_function_next =
+          SimpleInstallFunction(container, "AsyncFunctionNext",
+                                Builtins::kGeneratorPrototypeNext, 2, false);
+      Handle<JSFunction> async_function_throw =
+          SimpleInstallFunction(container, "AsyncFunctionThrow",
+                                Builtins::kGeneratorPrototypeThrow, 2, false);
+      async_function_next->shared()->set_native(true);
+      async_function_throw->shared()->set_native(true);
+    }
   }
 }
 
@@ -2358,6 +2494,7 @@
   }
 
   INITIALIZE_FLAG(FLAG_harmony_species)
+  INITIALIZE_FLAG(FLAG_intl_extra)
 
 #undef INITIALIZE_FLAG
 }
@@ -2366,12 +2503,9 @@
 #define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
   void Genesis::InitializeGlobal_##id() {}
 
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_for_in)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_iterator_close)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_exec)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
@@ -2379,11 +2513,17 @@
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(intl_extra)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_explicit_tailcalls)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_instanceof)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_exponentiation_operator)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
+#ifdef V8_I18N_SUPPORT
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
+#endif
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_async_await)
 
 void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
                          const char* name, Handle<Symbol> value) {
@@ -2416,9 +2556,26 @@
   if (!FLAG_harmony_sharedarraybuffer) return;
 
   Handle<JSGlobalObject> global(native_context()->global_object());
+  Isolate* isolate = global->GetIsolate();
+  Factory* factory = isolate->factory();
+
   Handle<JSFunction> shared_array_buffer_fun =
       InstallArrayBuffer(global, "SharedArrayBuffer");
   native_context()->set_shared_array_buffer_fun(*shared_array_buffer_fun);
+
+  Handle<String> name = factory->InternalizeUtf8String("Atomics");
+  Handle<JSFunction> cons = factory->NewFunction(name);
+  JSFunction::SetInstancePrototype(
+      cons,
+      Handle<Object>(native_context()->initial_object_prototype(), isolate));
+  Handle<JSObject> atomics_object = factory->NewJSObject(cons, TENURED);
+  DCHECK(atomics_object->IsJSObject());
+  JSObject::AddProperty(global, name, atomics_object, DONT_ENUM);
+
+  SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("load"),
+                        Builtins::kAtomicsLoad, 2, true);
+  SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("store"),
+                        Builtins::kAtomicsStore, 3, true);
 }
 
 
@@ -2665,11 +2822,22 @@
   native_context()->set_string_function_prototype_map(
       HeapObject::cast(string_function->initial_map()->prototype())->map());
 
+  Handle<JSGlobalObject> global_object =
+      handle(native_context()->global_object());
+
+  // Install Global.encodeURI.
+  SimpleInstallFunction(global_object, "encodeURI", Builtins::kGlobalEncodeURI,
+                        1, false);
+
+  // Install Global.encodeURIComponent.
+  SimpleInstallFunction(global_object, "encodeURIComponent",
+                        Builtins::kGlobalEncodeURIComponent, 1, false);
+
   // Install Global.eval.
   {
-    Handle<JSFunction> eval = SimpleInstallFunction(
-        handle(native_context()->global_object()), factory()->eval_string(),
-        Builtins::kGlobalEval, 1, false);
+    Handle<JSFunction> eval =
+        SimpleInstallFunction(global_object, factory()->eval_string(),
+                              Builtins::kGlobalEval, 1, false);
     native_context()->set_global_eval_fun(*eval);
   }
 
@@ -2712,8 +2880,7 @@
   {
     Handle<String> key = factory()->Promise_string();
     Handle<JSFunction> function = Handle<JSFunction>::cast(
-        JSReceiver::GetProperty(handle(native_context()->global_object()), key)
-            .ToHandleChecked());
+        JSReceiver::GetProperty(global_object, key).ToHandleChecked());
     JSFunction::EnsureHasInitialMap(function);
     function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
     function->shared()->set_construct_stub(
@@ -2724,37 +2891,6 @@
 
   InstallBuiltinFunctionIds();
 
-  // Also install builtin function ids to some generator object methods. These
-  // three methods use the three resume operations (Runtime_GeneratorNext,
-  // Runtime_GeneratorReturn, Runtime_GeneratorThrow) respectively. Those
-  // operations are not supported by Crankshaft, TurboFan, nor Ignition.
-  {
-    Handle<JSObject> generator_object_prototype(JSObject::cast(
-        native_context()->generator_object_prototype_map()->prototype()));
-
-    {  // GeneratorObject.prototype.next
-      Handle<String> key = factory()->next_string();
-      Handle<JSFunction> function = Handle<JSFunction>::cast(
-          JSReceiver::GetProperty(generator_object_prototype, key)
-              .ToHandleChecked());
-      function->shared()->set_builtin_function_id(kGeneratorObjectNext);
-    }
-    {  // GeneratorObject.prototype.return
-      Handle<String> key = factory()->NewStringFromAsciiChecked("return");
-      Handle<JSFunction> function = Handle<JSFunction>::cast(
-          JSReceiver::GetProperty(generator_object_prototype, key)
-              .ToHandleChecked());
-      function->shared()->set_builtin_function_id(kGeneratorObjectReturn);
-    }
-    {  // GeneratorObject.prototype.throw
-      Handle<String> key = factory()->throw_string();
-      Handle<JSFunction> function = Handle<JSFunction>::cast(
-          JSReceiver::GetProperty(generator_object_prototype, key)
-              .ToHandleChecked());
-      function->shared()->set_builtin_function_id(kGeneratorObjectThrow);
-    }
-  }
-
   // Create a map for accessor property descriptors (a variant of JSObject
   // that predefines four properties get, set, configurable and enumerable).
   {
@@ -2936,21 +3072,18 @@
 
 bool Genesis::InstallExperimentalNatives() {
   static const char* harmony_iterator_close_natives[] = {nullptr};
-  static const char* harmony_sloppy_natives[] = {nullptr};
-  static const char* harmony_sloppy_function_natives[] = {nullptr};
-  static const char* harmony_sloppy_let_natives[] = {nullptr};
   static const char* harmony_species_natives[] = {"native harmony-species.js",
                                                   nullptr};
+  static const char* harmony_explicit_tailcalls_natives[] = {nullptr};
   static const char* harmony_tailcalls_natives[] = {nullptr};
   static const char* harmony_unicode_regexps_natives[] = {
       "native harmony-unicode-regexps.js", nullptr};
-  static const char* harmony_object_observe_natives[] = {
-      "native harmony-object-observe.js", nullptr};
   static const char* harmony_sharedarraybuffer_natives[] = {
       "native harmony-sharedarraybuffer.js", "native harmony-atomics.js", NULL};
   static const char* harmony_simd_natives[] = {"native harmony-simd.js",
                                                nullptr};
   static const char* harmony_do_expressions_natives[] = {nullptr};
+  static const char* harmony_for_in_natives[] = {nullptr};
   static const char* harmony_regexp_exec_natives[] = {
       "native harmony-regexp-exec.js", nullptr};
   static const char* harmony_regexp_subclass_natives[] = {nullptr};
@@ -2962,6 +3095,7 @@
   static const char* harmony_function_sent_natives[] = {nullptr};
   static const char* promise_extra_natives[] = {"native promise-extra.js",
                                                 nullptr};
+  static const char* intl_extra_natives[] = {"native intl-extra.js", nullptr};
   static const char* harmony_object_values_entries_natives[] = {nullptr};
   static const char* harmony_object_own_property_descriptors_natives[] = {
       nullptr};
@@ -2969,6 +3103,12 @@
   static const char* harmony_exponentiation_operator_natives[] = {nullptr};
   static const char* harmony_string_padding_natives[] = {
       "native harmony-string-padding.js", nullptr};
+#ifdef V8_I18N_SUPPORT
+  static const char* icu_case_mapping_natives[] = {"native icu-case-mapping.js",
+                                                   nullptr};
+#endif
+  static const char* harmony_async_await_natives[] = {
+      "native harmony-async-await.js", nullptr};
 
   for (int i = ExperimentalNatives::GetDebuggerCount();
        i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2987,6 +3127,7 @@
     HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
     HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
     HARMONY_SHIPPING(INSTALL_EXPERIMENTAL_NATIVES);
+    INSTALL_EXPERIMENTAL_NATIVES(intl_extra, "");
     INSTALL_EXPERIMENTAL_NATIVES(promise_extra, "");
 #undef INSTALL_EXPERIMENTAL_NATIVES
   }
@@ -3179,17 +3320,20 @@
   Isolate* isolate = native_context->GetIsolate();
   ExtensionStates extension_states;  // All extensions have state UNVISITED.
   return InstallAutoExtensions(isolate, &extension_states) &&
-      (!FLAG_expose_free_buffer ||
-       InstallExtension(isolate, "v8/free-buffer", &extension_states)) &&
-      (!FLAG_expose_gc ||
-       InstallExtension(isolate, "v8/gc", &extension_states)) &&
-      (!FLAG_expose_externalize_string ||
-       InstallExtension(isolate, "v8/externalize", &extension_states)) &&
-      (!FLAG_track_gc_object_stats ||
-       InstallExtension(isolate, "v8/statistics", &extension_states)) &&
-      (!FLAG_expose_trigger_failure ||
-       InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
-      InstallRequestedExtensions(isolate, extensions, &extension_states);
+         (!FLAG_expose_free_buffer ||
+          InstallExtension(isolate, "v8/free-buffer", &extension_states)) &&
+         (!FLAG_expose_gc ||
+          InstallExtension(isolate, "v8/gc", &extension_states)) &&
+         (!FLAG_expose_externalize_string ||
+          InstallExtension(isolate, "v8/externalize", &extension_states)) &&
+         (!FLAG_track_gc_object_stats ||
+          InstallExtension(isolate, "v8/statistics", &extension_states)) &&
+         (!FLAG_expose_trigger_failure ||
+          InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
+         (!(FLAG_ignition && FLAG_trace_ignition_dispatches) ||
+          InstallExtension(isolate, "v8/ignition-statistics",
+                           &extension_states)) &&
+         InstallRequestedExtensions(isolate, extensions, &extension_states);
 }
 
 
@@ -3559,7 +3703,8 @@
     CreateRoots();
     Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
     CreateStrictModeFunctionMaps(empty_function);
-    CreateIteratorMaps();
+    CreateIteratorMaps(empty_function);
+    CreateAsyncFunctionMaps(empty_function);
     Handle<JSGlobalObject> global_object =
         CreateNewGlobals(global_proxy_template, global_proxy);
     HookUpGlobalProxy(global_object, global_proxy);
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index d1bf201..5563eea 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -136,6 +136,7 @@
   static v8::Extension* externalize_string_extension_;
   static v8::Extension* statistics_extension_;
   static v8::Extension* trigger_failure_extension_;
+  static v8::Extension* ignition_statistics_extension_;
 
   DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
 };
diff --git a/src/builtins.cc b/src/builtins.cc
index 9c3ff59..75f6150 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -4,13 +4,13 @@
 
 #include "src/builtins.h"
 
-#include "src/api.h"
 #include "src/api-arguments.h"
 #include "src/api-natives.h"
+#include "src/api.h"
 #include "src/base/once.h"
 #include "src/bootstrapper.h"
 #include "src/code-factory.h"
-#include "src/compiler/code-stub-assembler.h"
+#include "src/code-stub-assembler.h"
 #include "src/dateparser-inl.h"
 #include "src/elements.h"
 #include "src/frames-inl.h"
@@ -23,6 +23,7 @@
 #include "src/property-descriptor.h"
 #include "src/prototype.h"
 #include "src/string-builder.h"
+#include "src/uri.h"
 #include "src/vm-state-inl.h"
 
 namespace v8 {
@@ -41,12 +42,12 @@
   }
 
   Object*& operator[] (int index) {
-    DCHECK(index < length());
+    DCHECK_LT(index, length());
     return Arguments::operator[](index);
   }
 
   template <class S> Handle<S> at(int index) {
-    DCHECK(index < length());
+    DCHECK_LT(index, length());
     return Arguments::at<S>(index);
   }
 
@@ -138,25 +139,28 @@
 //
 // In the body of the builtin function the arguments can be accessed
 // through the BuiltinArguments object args.
-
+// TODO(cbruni): add global flag to check whether any tracing events have been
+// enabled.
 #define BUILTIN(name)                                                          \
   MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
                                                      Isolate* isolate);        \
-  MUST_USE_RESULT static Object* Builtin_##name(                               \
+                                                                               \
+  V8_NOINLINE static Object* Builtin_Impl_Stats_##name(                        \
       int args_length, Object** args_object, Isolate* isolate) {               \
-    Object* value;                                                             \
-    isolate->counters()->runtime_calls()->Increment();                         \
+    name##ArgumentsType args(args_length, args_object);                        \
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name);   \
     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),                      \
                  "V8.Builtin_" #name);                                         \
-    name##ArgumentsType args(args_length, args_object);                        \
+    return Builtin_Impl_##name(args, isolate);                                 \
+  }                                                                            \
+                                                                               \
+  MUST_USE_RESULT static Object* Builtin_##name(                               \
+      int args_length, Object** args_object, Isolate* isolate) {               \
     if (FLAG_runtime_call_stats) {                                             \
-      RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();     \
-      RuntimeCallTimerScope timer(isolate, &stats->Builtin_##name);            \
-      value = Builtin_Impl_##name(args, isolate);                              \
-    } else {                                                                   \
-      value = Builtin_Impl_##name(args, isolate);                              \
+      return Builtin_Impl_Stats_##name(args_length, args_object, isolate);     \
     }                                                                          \
-    return value;                                                              \
+    name##ArgumentsType args(args_length, args_object);                        \
+    return Builtin_Impl_##name(args, isolate);                                 \
   }                                                                            \
                                                                                \
   MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
@@ -164,7 +168,6 @@
 
 // ----------------------------------------------------------------------------
 
-
 #define CHECK_RECEIVER(Type, name, method)                                  \
   if (!args.receiver()->Is##Type()) {                                       \
     THROW_NEW_ERROR_RETURN_FAILURE(                                         \
@@ -207,9 +210,14 @@
 
 inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
                                      int* out) {
-  Map* arguments_map = isolate->native_context()->sloppy_arguments_map();
-  if (object->map() != arguments_map) return false;
-  DCHECK(object->HasFastElements());
+  Context* context = *isolate->native_context();
+  Map* map = object->map();
+  if (map != context->sloppy_arguments_map() &&
+      map != context->strict_arguments_map() &&
+      map != context->fast_aliased_arguments_map()) {
+    return false;
+  }
+  DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
   Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
   if (!len_obj->IsSmi()) return false;
   *out = Max(0, Smi::cast(len_obj)->value());
@@ -269,7 +277,6 @@
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
   ElementsKind origin_kind = array->GetElementsKind();
   if (IsDictionaryElementsKind(origin_kind)) return false;
-  if (array->map()->is_observed()) return false;
   if (!array->map()->is_extensible()) return false;
   if (args == nullptr) return true;
 
@@ -344,11 +351,43 @@
 
 BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
 
-void Builtins::Generate_ObjectHasOwnProperty(
-    compiler::CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIsArray(CodeStubAssembler* assembler) {
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Label Label;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Label Label;
+
+  Node* object = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  Label call_runtime(assembler), return_true(assembler),
+      return_false(assembler);
+
+  assembler->GotoIf(assembler->WordIsSmi(object), &return_false);
+  Node* instance_type = assembler->LoadInstanceType(object);
+
+  assembler->GotoIf(assembler->Word32Equal(
+                        instance_type, assembler->Int32Constant(JS_ARRAY_TYPE)),
+                    &return_true);
+
+  // TODO(verwaest): Handle proxies in-place.
+  assembler->Branch(assembler->Word32Equal(
+                        instance_type, assembler->Int32Constant(JS_PROXY_TYPE)),
+                    &call_runtime, &return_false);
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+
+  assembler->Bind(&call_runtime);
+  assembler->Return(
+      assembler->CallRuntime(Runtime::kArrayIsArray, context, object));
+}
+
+void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* object = assembler->Parameter(0);
   Node* key = assembler->Parameter(1);
@@ -368,153 +407,17 @@
 
   Variable var_index(assembler, MachineRepresentation::kWord32);
 
-  Label if_keyissmi(assembler), if_keyisnotsmi(assembler),
-      keyisindex(assembler);
-  assembler->Branch(assembler->WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
-  assembler->Bind(&if_keyissmi);
-  {
-    // Negative smi keys are named properties. Handle in the runtime.
-    Label if_keyispositive(assembler);
-    assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositive,
-                      &call_runtime);
-    assembler->Bind(&if_keyispositive);
+  Label keyisindex(assembler), if_iskeyunique(assembler);
+  assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
+                       &call_runtime);
 
-    var_index.Bind(assembler->SmiUntag(key));
-    assembler->Goto(&keyisindex);
-  }
-
-  assembler->Bind(&if_keyisnotsmi);
-
-  Node* key_instance_type = assembler->LoadInstanceType(key);
-  Label if_iskeyunique(assembler), if_iskeynotsymbol(assembler);
-  assembler->Branch(
-      assembler->Word32Equal(key_instance_type,
-                             assembler->Int32Constant(SYMBOL_TYPE)),
-      &if_iskeyunique, &if_iskeynotsymbol);
-  assembler->Bind(&if_iskeynotsymbol);
-  {
-    Label if_iskeyinternalized(assembler);
-    Node* bits = assembler->WordAnd(
-        key_instance_type,
-        assembler->Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
-    assembler->Branch(
-        assembler->Word32Equal(
-            bits, assembler->Int32Constant(kStringTag | kInternalizedTag)),
-        &if_iskeyinternalized, &call_runtime);
-    assembler->Bind(&if_iskeyinternalized);
-
-    // Check whether the key is an array index passed in as string. Handle
-    // uniform with smi keys if so.
-    // TODO(verwaest): Also support non-internalized strings.
-    Node* hash = assembler->LoadNameHash(key);
-    Node* bit = assembler->Word32And(
-        hash, assembler->Int32Constant(internal::Name::kIsNotArrayIndexMask));
-    Label if_isarrayindex(assembler);
-    assembler->Branch(assembler->Word32Equal(bit, assembler->Int32Constant(0)),
-                      &if_isarrayindex, &if_iskeyunique);
-    assembler->Bind(&if_isarrayindex);
-    var_index.Bind(
-        assembler->BitFieldDecode<internal::Name::ArrayIndexValueBits>(hash));
-    assembler->Goto(&keyisindex);
-  }
   assembler->Bind(&if_iskeyunique);
-
-  {
-    Label if_objectissimple(assembler);
-    assembler->Branch(assembler->Int32LessThanOrEqual(
-                          instance_type,
-                          assembler->Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
-                      &call_runtime, &if_objectissimple);
-    assembler->Bind(&if_objectissimple);
-  }
-
-  // TODO(verwaest): Perform a dictonary lookup on slow-mode receivers.
-  Node* bit_field3 = assembler->LoadMapBitField3(map);
-  Node* bit = assembler->BitFieldDecode<Map::DictionaryMap>(bit_field3);
-  Label if_isfastmap(assembler);
-  assembler->Branch(assembler->Word32Equal(bit, assembler->Int32Constant(0)),
-                    &if_isfastmap, &call_runtime);
-  assembler->Bind(&if_isfastmap);
-  Node* nof =
-      assembler->BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
-  // Bail out to the runtime for large numbers of own descriptors. The stub only
-  // does linear search, which becomes too expensive in that case.
-  {
-    static const int32_t kMaxLinear = 256;
-    Label above_max(assembler), below_max(assembler);
-    assembler->Branch(assembler->Int32LessThanOrEqual(
-                          nof, assembler->Int32Constant(kMaxLinear)),
-                      &below_max, &call_runtime);
-    assembler->Bind(&below_max);
-  }
-  Node* descriptors = assembler->LoadMapDescriptors(map);
-
-  Variable var_descriptor(assembler, MachineRepresentation::kWord32);
-  Label loop(assembler, &var_descriptor);
-  var_descriptor.Bind(assembler->Int32Constant(0));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
-  {
-    Node* index = var_descriptor.value();
-    Node* offset = assembler->Int32Constant(DescriptorArray::ToKeyIndex(0));
-    Node* factor = assembler->Int32Constant(DescriptorArray::kDescriptorSize);
-    Label if_notdone(assembler);
-    assembler->Branch(assembler->Word32Equal(index, nof), &return_false,
-                      &if_notdone);
-    assembler->Bind(&if_notdone);
-    {
-      Node* array_index =
-          assembler->Int32Add(offset, assembler->Int32Mul(index, factor));
-      Node* current =
-          assembler->LoadFixedArrayElementInt32Index(descriptors, array_index);
-      Label if_unequal(assembler);
-      assembler->Branch(assembler->WordEqual(current, key), &return_true,
-                        &if_unequal);
-      assembler->Bind(&if_unequal);
-
-      var_descriptor.Bind(
-          assembler->Int32Add(index, assembler->Int32Constant(1)));
-      assembler->Goto(&loop);
-    }
-  }
+  assembler->TryLookupProperty(object, map, instance_type, key, &return_true,
+                               &return_false, &call_runtime);
 
   assembler->Bind(&keyisindex);
-  {
-    Label if_objectissimple(assembler);
-    assembler->Branch(assembler->Int32LessThanOrEqual(
-                          instance_type, assembler->Int32Constant(
-                                             LAST_CUSTOM_ELEMENTS_RECEIVER)),
-                      &call_runtime, &if_objectissimple);
-    assembler->Bind(&if_objectissimple);
-  }
-
-  Node* index = var_index.value();
-  Node* bit_field2 = assembler->LoadMapBitField2(map);
-  Node* elements_kind =
-      assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
-
-  // TODO(verwaest): Support other elements kinds as well.
-  Label if_isobjectorsmi(assembler);
-  assembler->Branch(
-      assembler->Int32LessThanOrEqual(
-          elements_kind, assembler->Int32Constant(FAST_HOLEY_ELEMENTS)),
-      &if_isobjectorsmi, &call_runtime);
-  assembler->Bind(&if_isobjectorsmi);
-  {
-    Node* elements = assembler->LoadElements(object);
-    Node* length = assembler->LoadFixedArrayBaseLength(elements);
-
-    Label if_iskeyinrange(assembler);
-    assembler->Branch(
-        assembler->Int32LessThan(index, assembler->SmiToWord32(length)),
-        &if_iskeyinrange, &return_false);
-
-    assembler->Bind(&if_iskeyinrange);
-    Node* element = assembler->LoadFixedArrayElementInt32Index(elements, index);
-    Node* the_hole = assembler->LoadRoot(Heap::kTheHoleValueRootIndex);
-    assembler->Branch(assembler->WordEqual(element, the_hole), &return_false,
-                      &return_true);
-  }
+  assembler->TryLookupElement(object, map, instance_type, var_index.value(),
+                              &return_true, &return_false, &call_runtime);
 
   assembler->Bind(&return_true);
   assembler->Return(assembler->BooleanConstant(true));
@@ -577,7 +480,6 @@
   }
 
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
-  DCHECK(!array->map()->is_observed());
 
   uint32_t len = static_cast<uint32_t>(Smi::cast(array->length())->value());
   if (len == 0) return isolate->heap()->undefined_value();
@@ -610,7 +512,6 @@
     return CallJsIntrinsic(isolate, isolate->array_shift(), args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
-  DCHECK(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
@@ -631,7 +532,6 @@
     return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
-  DCHECK(!array->map()->is_observed());
   int to_add = args.length() - 1;
   if (to_add == 0) return array->length();
 
@@ -670,10 +570,11 @@
   } else if (receiver->IsJSObject() &&
              GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
                                       &len)) {
-    DCHECK_EQ(FAST_ELEMENTS, JSObject::cast(*receiver)->GetElementsKind());
-    // Array.prototype.slice(arguments, ...) is quite a common idiom
+    // Array.prototype.slice.call(arguments, ...) is quite a common idiom
     // (notably more than 50% of invocations in Web apps).
     // Treat it in C++ as well.
+    DCHECK(JSObject::cast(*receiver)->HasFastElements() ||
+           JSObject::cast(*receiver)->HasFastArgumentsElements());
   } else {
     AllowHeapAllocation allow_allocation;
     return CallJsIntrinsic(isolate, isolate->array_slice(), args);
@@ -729,7 +630,6 @@
     return CallJsIntrinsic(isolate, isolate->array_splice(), args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
-  DCHECK(!array->map()->is_observed());
 
   int argument_count = args.length() - 1;
   int relative_start = 0;
@@ -1323,23 +1223,18 @@
   return true;
 }
 
-
-bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
-  Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
-  Maybe<bool> maybe = JSReceiver::HasProperty(obj, key);
-  return maybe.FromMaybe(false);
-}
-
-
 static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
   HandleScope handle_scope(isolate);
   if (!obj->IsJSReceiver()) return Just(false);
-  Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
-  Handle<Object> value;
-  MaybeHandle<Object> maybeValue =
-      i::Runtime::GetObjectProperty(isolate, obj, key);
-  if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
-  if (!value->IsUndefined()) return Just(value->BooleanValue());
+  if (!isolate->IsIsConcatSpreadableLookupChainIntact()) {
+    // Slow path if @@isConcatSpreadable has been used.
+    Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
+    Handle<Object> value;
+    MaybeHandle<Object> maybeValue =
+        i::Runtime::GetObjectProperty(isolate, obj, key);
+    if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
+    if (!value->IsUndefined()) return Just(value->BooleanValue());
+  }
   return Object::IsArray(obj);
 }
 
@@ -1527,8 +1422,24 @@
   }
 }
 
+bool IsSimpleArray(Isolate* isolate, Handle<JSArray> obj) {
+  DisallowHeapAllocation no_gc;
+  Map* map = obj->map();
+  // If there is only the 'length' property we are fine.
+  if (map->prototype() ==
+          isolate->native_context()->initial_array_prototype() &&
+      map->NumberOfOwnDescriptors() == 1) {
+    return true;
+  }
+  // TODO(cbruni): slower lookup for array subclasses and support slow
+  // @@IsConcatSpreadable lookup.
+  return false;
+}
 
 MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
+  if (!isolate->IsIsConcatSpreadableLookupChainIntact()) {
+    return MaybeHandle<JSArray>();
+  }
   // We shouldn't overflow when adding another len.
   const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
   STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
@@ -1544,14 +1455,15 @@
     for (int i = 0; i < n_arguments; i++) {
       Object* arg = (*args)[i];
       if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
-      if (!JSObject::cast(arg)->HasFastElements()) {
-        return MaybeHandle<JSArray>();
-      }
       if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
         return MaybeHandle<JSArray>();
       }
+      // TODO(cbruni): support fast concatenation of DICTIONARY_ELEMENTS.
+      if (!JSObject::cast(arg)->HasFastElements()) {
+        return MaybeHandle<JSArray>();
+      }
       Handle<JSArray> array(JSArray::cast(arg), isolate);
-      if (HasConcatSpreadableModifier(isolate, array)) {
+      if (!IsSimpleArray(isolate, array)) {
         return MaybeHandle<JSArray>();
       }
       // The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
@@ -1559,16 +1471,15 @@
       result_len += Smi::cast(array->length())->value();
       DCHECK(result_len >= 0);
       // Throw an Error if we overflow the FixedArray limits
-      if (FixedDoubleArray::kMaxLength < result_len ||
-          FixedArray::kMaxLength < result_len) {
-        AllowHeapAllocation allow_gc;
+      if (FixedArray::kMaxLength < result_len) {
+        AllowHeapAllocation gc;
         THROW_NEW_ERROR(isolate,
                         NewRangeError(MessageTemplate::kInvalidArrayLength),
                         JSArray);
       }
     }
   }
-  return ElementsAccessor::Concat(isolate, args, n_arguments);
+  return ElementsAccessor::Concat(isolate, args, n_arguments, result_len);
 }
 
 }  // namespace
@@ -1616,16 +1527,6 @@
 }
 
 
-// ES6 22.1.2.2 Array.isArray
-BUILTIN(ArrayIsArray) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  Handle<Object> object = args.at<Object>(1);
-  Maybe<bool> result = Object::IsArray(object);
-  MAYBE_RETURN(result, isolate->heap()->exception());
-  return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
 namespace {
 
 MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
@@ -1637,6 +1538,14 @@
                 String::cast(*next_source)->length() == 0);
   }
 
+  // If the target is deprecated, the object will be updated on first store. If
+  // the source for that store equals the target, this will invalidate the
+  // cached representation of the source. Preventively upgrade the target.
+  // Do this on each iteration since any property load could cause deprecation.
+  if (to->map()->is_deprecated()) {
+    JSObject::MigrateInstance(Handle<JSObject>::cast(to));
+  }
+
   Isolate* isolate = to->GetIsolate();
   Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
 
@@ -1789,6 +1698,156 @@
   return *object;
 }
 
+// ES6 section 19.1.2.3 Object.defineProperties
+BUILTIN(ObjectDefineProperties) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> properties = args.at<Object>(2);
+
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      JSReceiver::DefineProperties(isolate, target, properties));
+  return *result;
+}
+
+// ES6 section 19.1.2.4 Object.defineProperty
+BUILTIN(ObjectDefineProperty) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(4, args.length());
+  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> key = args.at<Object>(2);
+  Handle<Object> attributes = args.at<Object>(3);
+
+  return JSReceiver::DefineProperty(isolate, target, key, attributes);
+}
+
+namespace {
+
+template <AccessorComponent which_accessor>
+Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
+                             Handle<Object> name, Handle<Object> accessor) {
+  // 1. Let O be ? ToObject(this value).
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ConvertReceiver(isolate, object));
+  // 2. If IsCallable(getter) is false, throw a TypeError exception.
+  if (!accessor->IsCallable()) {
+    MessageTemplate::Template message =
+        which_accessor == ACCESSOR_GETTER
+            ? MessageTemplate::kObjectGetterExpectingFunction
+            : MessageTemplate::kObjectSetterExpectingFunction;
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message));
+  }
+  // 3. Let desc be PropertyDescriptor{[[Get]]: getter, [[Enumerable]]: true,
+  //                                   [[Configurable]]: true}.
+  PropertyDescriptor desc;
+  if (which_accessor == ACCESSOR_GETTER) {
+    desc.set_get(accessor);
+  } else {
+    DCHECK(which_accessor == ACCESSOR_SETTER);
+    desc.set_set(accessor);
+  }
+  desc.set_enumerable(true);
+  desc.set_configurable(true);
+  // 4. Let key be ? ToPropertyKey(P).
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+                                     Object::ToPropertyKey(isolate, name));
+  // 5. Perform ? DefinePropertyOrThrow(O, key, desc).
+  // To preserve legacy behavior, we ignore errors silently rather than
+  // throwing an exception.
+  Maybe<bool> success = JSReceiver::DefineOwnProperty(
+      isolate, receiver, name, &desc, Object::DONT_THROW);
+  MAYBE_RETURN(success, isolate->heap()->exception());
+  // 6. Return undefined.
+  return isolate->heap()->undefined_value();
+}
+
+Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
+                             Handle<Object> key, AccessorComponent component) {
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
+                                     Object::ConvertReceiver(isolate, object));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+                                     Object::ToPropertyKey(isolate, key));
+  bool success = false;
+  LookupIterator it = LookupIterator::PropertyOrElement(
+      isolate, object, key, &success,
+      LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+  DCHECK(success);
+
+  for (; it.IsFound(); it.Next()) {
+    switch (it.state()) {
+      case LookupIterator::INTERCEPTOR:
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
+
+      case LookupIterator::ACCESS_CHECK:
+        if (it.HasAccess()) continue;
+        isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+        RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+        return isolate->heap()->undefined_value();
+
+      case LookupIterator::JSPROXY:
+        return isolate->heap()->undefined_value();
+
+      case LookupIterator::INTEGER_INDEXED_EXOTIC:
+        return isolate->heap()->undefined_value();
+      case LookupIterator::DATA:
+        continue;
+      case LookupIterator::ACCESSOR: {
+        Handle<Object> maybe_pair = it.GetAccessors();
+        if (maybe_pair->IsAccessorPair()) {
+          return *AccessorPair::GetComponent(
+              Handle<AccessorPair>::cast(maybe_pair), component);
+        }
+      }
+    }
+  }
+
+  return isolate->heap()->undefined_value();
+}
+
+}  // namespace
+
+// ES6 B.2.2.2 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__defineGetter__
+BUILTIN(ObjectDefineGetter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);  // Receiver.
+  Handle<Object> name = args.at<Object>(1);
+  Handle<Object> getter = args.at<Object>(2);
+  return ObjectDefineAccessor<ACCESSOR_GETTER>(isolate, object, name, getter);
+}
+
+// ES6 B.2.2.3 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__defineSetter__
+BUILTIN(ObjectDefineSetter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);  // Receiver.
+  Handle<Object> name = args.at<Object>(1);
+  Handle<Object> setter = args.at<Object>(2);
+  return ObjectDefineAccessor<ACCESSOR_SETTER>(isolate, object, name, setter);
+}
+
+// ES6 B.2.2.4 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupGetter__
+BUILTIN(ObjectLookupGetter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> name = args.at<Object>(1);
+  return ObjectLookupAccessor(isolate, object, name, ACCESSOR_GETTER);
+}
+
+// ES6 B.2.2.5 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupSetter__
+BUILTIN(ObjectLookupSetter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> name = args.at<Object>(1);
+  return ObjectLookupAccessor(isolate, object, name, ACCESSOR_SETTER);
+}
 
 // ES6 section 19.1.2.5 Object.freeze ( O )
 BUILTIN(ObjectFreeze) {
@@ -1803,6 +1862,23 @@
 }
 
 
+// ES section 19.1.2.9 Object.getPrototypeOf ( O )
+BUILTIN(ObjectGetPrototypeOf) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, receiver, Object::ToObject(isolate, object));
+
+  Handle<Object> prototype;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, prototype, JSReceiver::GetPrototype(isolate, receiver));
+
+  return *prototype;
+}
+
+
 // ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
 BUILTIN(ObjectGetOwnPropertyDescriptor) {
   HandleScope scope(isolate);
@@ -2031,6 +2107,26 @@
   return *object;
 }
 
+// ES6 section 18.2.6.4 encodeURI (uri)
+BUILTIN(GlobalEncodeURI) {
+  HandleScope scope(isolate);
+  Handle<String> uri;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, uri, Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  return Uri::EncodeUri(isolate, uri);
+}
+
+// ES6 section 18.2.6.5 encodeURIComponenet (uriComponent)
+BUILTIN(GlobalEncodeURIComponent) {
+  HandleScope scope(isolate);
+  Handle<String> uriComponent;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, uriComponent,
+      Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+  return Uri::EncodeUriComponent(isolate, uriComponent);
+}
 
 namespace {
 
@@ -2069,11 +2165,12 @@
   }
 
   // Compile source string in the native context.
-  Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared(),
-                                        isolate);
+  int eval_scope_position = 0;
+  int eval_position = RelocInfo::kNoPosition;
+  Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared());
   return Compiler::GetFunctionFromEval(source, outer_info, native_context,
-                                       SLOPPY, restriction,
-                                       RelocInfo::kNoPosition);
+                                       SLOPPY, restriction, eval_scope_position,
+                                       eval_position);
 }
 
 }  // namespace
@@ -2135,12 +2232,11 @@
 namespace {
 
 void Generate_MathRoundingOperation(
-    compiler::CodeStubAssembler* assembler,
-    compiler::Node* (compiler::CodeStubAssembler::*float64op)(
-        compiler::Node*)) {
-  typedef compiler::CodeStubAssembler::Label Label;
+    CodeStubAssembler* assembler,
+    compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* context = assembler->Parameter(4);
 
@@ -2197,16 +2293,15 @@
 }  // namespace
 
 // ES6 section 20.2.2.10 Math.ceil ( x )
-void Builtins::Generate_MathCeil(compiler::CodeStubAssembler* assembler) {
-  Generate_MathRoundingOperation(assembler,
-                                 &compiler::CodeStubAssembler::Float64Ceil);
+void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
 }
 
 // ES6 section 20.2.2.11 Math.clz32 ( x )
-void Builtins::Generate_MathClz32(compiler::CodeStubAssembler* assembler) {
-  typedef compiler::CodeStubAssembler::Label Label;
+void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* context = assembler->Parameter(4);
 
@@ -2271,9 +2366,8 @@
 }
 
 // ES6 section 20.2.2.16 Math.floor ( x )
-void Builtins::Generate_MathFloor(compiler::CodeStubAssembler* assembler) {
-  Generate_MathRoundingOperation(assembler,
-                                 &compiler::CodeStubAssembler::Float64Floor);
+void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
 }
 
 // ES6 section 20.2.2.17 Math.fround ( x )
@@ -2299,13 +2393,12 @@
 }
 
 // ES6 section 20.2.2.28 Math.round ( x )
-void Builtins::Generate_MathRound(compiler::CodeStubAssembler* assembler) {
-  Generate_MathRoundingOperation(assembler,
-                                 &compiler::CodeStubAssembler::Float64Round);
+void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
 }
 
 // ES6 section 20.2.2.32 Math.sqrt ( x )
-void Builtins::Generate_MathSqrt(compiler::CodeStubAssembler* assembler) {
+void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
   using compiler::Node;
 
   Node* x = assembler->Parameter(1);
@@ -2317,15 +2410,133 @@
 }
 
 // ES6 section 20.2.2.35 Math.trunc ( x )
-void Builtins::Generate_MathTrunc(compiler::CodeStubAssembler* assembler) {
-  Generate_MathRoundingOperation(assembler,
-                                 &compiler::CodeStubAssembler::Float64Trunc);
+void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
+  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
+}
+
+// -----------------------------------------------------------------------------
+// ES6 section 19.2 Function Objects
+
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
+void Builtins::Generate_FunctionPrototypeHasInstance(
+    CodeStubAssembler* assembler) {
+  using compiler::Node;
+
+  Node* f = assembler->Parameter(0);
+  Node* v = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* result = assembler->OrdinaryHasInstance(context, f, v);
+  assembler->Return(result);
+}
+
+// -----------------------------------------------------------------------------
+// ES6 section 25.3 Generator Objects
+
+namespace {
+
+void Generate_GeneratorPrototypeResume(
+    CodeStubAssembler* assembler, JSGeneratorObject::ResumeMode resume_mode,
+    char const* const method_name) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* value = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+  Node* closed = assembler->SmiConstant(
+      Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+
+  // Check if the {receiver} is actually a JSGeneratorObject.
+  Label if_receiverisincompatible(assembler, Label::kDeferred);
+  assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+  assembler->GotoUnless(assembler->Word32Equal(
+                            receiver_instance_type,
+                            assembler->Int32Constant(JS_GENERATOR_OBJECT_TYPE)),
+                        &if_receiverisincompatible);
+
+  // Check if the {receiver} is running or already closed.
+  Node* receiver_continuation = assembler->LoadObjectField(
+      receiver, JSGeneratorObject::kContinuationOffset);
+  Label if_receiverisclosed(assembler, Label::kDeferred),
+      if_receiverisrunning(assembler, Label::kDeferred);
+  assembler->GotoIf(assembler->SmiEqual(receiver_continuation, closed),
+                    &if_receiverisclosed);
+  DCHECK_LT(JSGeneratorObject::kGeneratorExecuting,
+            JSGeneratorObject::kGeneratorClosed);
+  assembler->GotoIf(assembler->SmiLessThan(receiver_continuation, closed),
+                    &if_receiverisrunning);
+
+  // Resume the {receiver} using our trampoline.
+  Node* result = assembler->CallStub(
+      CodeFactory::ResumeGenerator(assembler->isolate()), context, value,
+      receiver, assembler->SmiConstant(Smi::FromInt(resume_mode)));
+  assembler->Return(result);
+
+  assembler->Bind(&if_receiverisincompatible);
+  {
+    // The {receiver} is not a valid JSGeneratorObject.
+    Node* result = assembler->CallRuntime(
+        Runtime::kThrowIncompatibleMethodReceiver, context,
+        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+            method_name, TENURED)),
+        receiver);
+    assembler->Return(result);  // Never reached.
+  }
+
+  assembler->Bind(&if_receiverisclosed);
+  {
+    // The {receiver} is closed already.
+    Node* result = nullptr;
+    switch (resume_mode) {
+      case JSGeneratorObject::kNext:
+        result = assembler->CallRuntime(Runtime::kCreateIterResultObject,
+                                        context, assembler->UndefinedConstant(),
+                                        assembler->BooleanConstant(true));
+        break;
+      case JSGeneratorObject::kReturn:
+        result =
+            assembler->CallRuntime(Runtime::kCreateIterResultObject, context,
+                                   value, assembler->BooleanConstant(true));
+        break;
+      case JSGeneratorObject::kThrow:
+        result = assembler->CallRuntime(Runtime::kThrow, context, value);
+        break;
+    }
+    assembler->Return(result);
+  }
+
+  assembler->Bind(&if_receiverisrunning);
+  {
+    Node* result =
+        assembler->CallRuntime(Runtime::kThrowGeneratorRunning, context);
+    assembler->Return(result);  // Never reached.
+  }
+}
+
+}  // namespace
+
+// ES6 section 25.3.1.2 Generator.prototype.next ( value )
+void Builtins::Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler) {
+  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kNext,
+                                    "[Generator].prototype.next");
+}
+
+// ES6 section 25.3.1.3 Generator.prototype.return ( value )
+void Builtins::Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler) {
+  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kReturn,
+                                    "[Generator].prototype.return");
+}
+
+// ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
+void Builtins::Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler) {
+  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kThrow,
+                                    "[Generator].prototype.throw");
 }
 
 // -----------------------------------------------------------------------------
 // ES6 section 26.1 The Reflect Object
 
-
 // ES6 section 26.1.3 Reflect.defineProperty
 BUILTIN(ReflectDefineProperty) {
   HandleScope scope(isolate);
@@ -2971,8 +3182,7 @@
   HandleScope scope(isolate);
   double const time_val = JSDate::CurrentTimeValue(isolate);
   char buffer[128];
-  Vector<char> str(buffer, arraysize(buffer));
-  ToDateString(time_val, str, isolate->date_cache());
+  ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
   Handle<String> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
@@ -3555,8 +3765,8 @@
   HandleScope scope(isolate);
   CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
   char buffer[128];
-  Vector<char> str(buffer, arraysize(buffer));
-  ToDateString(date->value()->Number(), str, isolate->date_cache(), kDateOnly);
+  ToDateString(date->value()->Number(), ArrayVector(buffer),
+               isolate->date_cache(), kDateOnly);
   Handle<String> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
@@ -3579,18 +3789,17 @@
   isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
                                        &hour, &min, &sec, &ms);
   char buffer[128];
-  Vector<char> str(buffer, arraysize(buffer));
   if (year >= 0 && year <= 9999) {
-    SNPrintF(str, "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", year, month + 1, day,
-             hour, min, sec, ms);
+    SNPrintF(ArrayVector(buffer), "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", year,
+             month + 1, day, hour, min, sec, ms);
   } else if (year < 0) {
-    SNPrintF(str, "-%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", -year, month + 1, day,
-             hour, min, sec, ms);
+    SNPrintF(ArrayVector(buffer), "-%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", -year,
+             month + 1, day, hour, min, sec, ms);
   } else {
-    SNPrintF(str, "+%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", year, month + 1, day,
-             hour, min, sec, ms);
+    SNPrintF(ArrayVector(buffer), "+%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", year,
+             month + 1, day, hour, min, sec, ms);
   }
-  return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+  return *isolate->factory()->NewStringFromAsciiChecked(buffer);
 }
 
 
@@ -3599,8 +3808,8 @@
   HandleScope scope(isolate);
   CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
   char buffer[128];
-  Vector<char> str(buffer, arraysize(buffer));
-  ToDateString(date->value()->Number(), str, isolate->date_cache());
+  ToDateString(date->value()->Number(), ArrayVector(buffer),
+               isolate->date_cache());
   Handle<String> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
@@ -3614,8 +3823,8 @@
   HandleScope scope(isolate);
   CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
   char buffer[128];
-  Vector<char> str(buffer, arraysize(buffer));
-  ToDateString(date->value()->Number(), str, isolate->date_cache(), kTimeOnly);
+  ToDateString(date->value()->Number(), ArrayVector(buffer),
+               isolate->date_cache(), kTimeOnly);
   Handle<String> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
@@ -3633,14 +3842,14 @@
     return *isolate->factory()->NewStringFromAsciiChecked("Invalid Date");
   }
   char buffer[128];
-  Vector<char> str(buffer, arraysize(buffer));
   int64_t time_ms = static_cast<int64_t>(time_val);
   int year, month, day, weekday, hour, min, sec, ms;
   isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
                                        &hour, &min, &sec, &ms);
-  SNPrintF(str, "%s, %02d %s %4d %02d:%02d:%02d GMT", kShortWeekDays[weekday],
-           day, kShortMonths[month], year, hour, min, sec);
-  return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+  SNPrintF(ArrayVector(buffer), "%s, %02d %s %4d %02d:%02d:%02d GMT",
+           kShortWeekDays[weekday], day, kShortMonths[month], year, hour, min,
+           sec);
+  return *isolate->factory()->NewStringFromAsciiChecked(buffer);
 }
 
 
@@ -3963,46 +4172,67 @@
       isolate, function,
       isolate->factory()->NewJSBoundFunction(target, this_arg, argv));
 
-  // TODO(bmeurer): Optimize the rest for the common cases where {target} is
-  // a function with some initial map or even a bound function.
+  LookupIterator length_lookup(target, isolate->factory()->length_string(),
+                               target, LookupIterator::OWN);
   // Setup the "length" property based on the "length" of the {target}.
-  Handle<Object> length(Smi::FromInt(0), isolate);
-  Maybe<bool> target_has_length =
-      JSReceiver::HasOwnProperty(target, isolate->factory()->length_string());
-  if (!target_has_length.IsJust()) {
-    return isolate->heap()->exception();
-  } else if (target_has_length.FromJust()) {
-    Handle<Object> target_length;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, target_length,
-        JSReceiver::GetProperty(target, isolate->factory()->length_string()));
-    if (target_length->IsNumber()) {
-      length = isolate->factory()->NewNumber(std::max(
-          0.0, DoubleToInteger(target_length->Number()) - argv.length()));
+  // If the targets length is the default JSFunction accessor, we can keep the
+  // accessor that's installed by default on the JSBoundFunction. It lazily
+  // computes the value from the underlying internal length.
+  if (!target->IsJSFunction() ||
+      length_lookup.state() != LookupIterator::ACCESSOR ||
+      !length_lookup.GetAccessors()->IsAccessorInfo()) {
+    Handle<Object> length(Smi::FromInt(0), isolate);
+    Maybe<PropertyAttributes> attributes =
+        JSReceiver::GetPropertyAttributes(&length_lookup);
+    if (!attributes.IsJust()) return isolate->heap()->exception();
+    if (attributes.FromJust() != ABSENT) {
+      Handle<Object> target_length;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_length,
+                                         Object::GetProperty(&length_lookup));
+      if (target_length->IsNumber()) {
+        length = isolate->factory()->NewNumber(std::max(
+            0.0, DoubleToInteger(target_length->Number()) - argv.length()));
+      }
     }
+    LookupIterator it(function, isolate->factory()->length_string(), function);
+    DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
+    RETURN_FAILURE_ON_EXCEPTION(isolate,
+                                JSObject::DefineOwnPropertyIgnoreAttributes(
+                                    &it, length, it.property_attributes()));
   }
-  function->set_length(*length);
 
   // Setup the "name" property based on the "name" of the {target}.
-  Handle<Object> target_name;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, target_name,
-      JSReceiver::GetProperty(target, isolate->factory()->name_string()));
-  Handle<String> name;
-  if (!target_name->IsString()) {
-    name = isolate->factory()->bound__string();
-  } else {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, name, Name::ToFunctionName(Handle<String>::cast(target_name)));
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, name, isolate->factory()->NewConsString(
-                           isolate->factory()->bound__string(), name));
+  // If the targets name is the default JSFunction accessor, we can keep the
+  // accessor that's installed by default on the JSBoundFunction. It lazily
+  // computes the value from the underlying internal name.
+  LookupIterator name_lookup(target, isolate->factory()->name_string(), target,
+                             LookupIterator::OWN);
+  if (!target->IsJSFunction() ||
+      name_lookup.state() != LookupIterator::ACCESSOR ||
+      !name_lookup.GetAccessors()->IsAccessorInfo()) {
+    Handle<Object> target_name;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_name,
+                                       Object::GetProperty(&name_lookup));
+    Handle<String> name;
+    if (target_name->IsString()) {
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, name,
+          Name::ToFunctionName(Handle<String>::cast(target_name)));
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, name, isolate->factory()->NewConsString(
+                             isolate->factory()->bound__string(), name));
+    } else {
+      name = isolate->factory()->bound__string();
+    }
+    LookupIterator it(function, isolate->factory()->name_string());
+    DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
+    RETURN_FAILURE_ON_EXCEPTION(isolate,
+                                JSObject::DefineOwnPropertyIgnoreAttributes(
+                                    &it, name, it.property_attributes()));
   }
-  function->set_name(*name);
   return *function;
 }
 
-
 // ES6 section 19.2.3.5 Function.prototype.toString ( )
 BUILTIN(FunctionPrototypeToString) {
   HandleScope scope(isolate);
@@ -4028,6 +4258,13 @@
   return *result;
 }
 
+BUILTIN(AsyncFunctionConstructor) {
+  HandleScope scope(isolate);
+  Handle<JSFunction> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, CreateDynamicFunction(isolate, args, "async function"));
+  return *result;
+}
 
 // ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
 BUILTIN(SymbolConstructor) {
@@ -4128,6 +4365,193 @@
   return *result;
 }
 
+// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* position = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  // Check that {receiver} is coercible to Object and convert it to a String.
+  receiver =
+      assembler->ToThisString(context, receiver, "String.prototype.charAt");
+
+  // Convert the {position} to a Smi and check that it's in bounds of the
+  // {receiver}.
+  // TODO(bmeurer): Find an abstraction for this!
+  {
+    // Check if the {position} is already a Smi.
+    Variable var_position(assembler, MachineRepresentation::kTagged);
+    var_position.Bind(position);
+    Label if_positionissmi(assembler),
+        if_positionisnotsmi(assembler, Label::kDeferred);
+    assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
+                      &if_positionisnotsmi);
+    assembler->Bind(&if_positionisnotsmi);
+    {
+      // Convert the {position} to an Integer via the ToIntegerStub.
+      Callable callable = CodeFactory::ToInteger(assembler->isolate());
+      Node* index = assembler->CallStub(callable, context, position);
+
+      // Check if the resulting {index} is now a Smi.
+      Label if_indexissmi(assembler, Label::kDeferred),
+          if_indexisnotsmi(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
+                        &if_indexisnotsmi);
+
+      assembler->Bind(&if_indexissmi);
+      {
+        var_position.Bind(index);
+        assembler->Goto(&if_positionissmi);
+      }
+
+      assembler->Bind(&if_indexisnotsmi);
+      {
+        // The ToIntegerStub canonicalizes everything in Smi range to Smi
+        // representation, so any HeapNumber returned is not in Smi range.
+        // The only exception here is -0.0, which we treat as 0.
+        Node* index_value = assembler->LoadHeapNumberValue(index);
+        Label if_indexiszero(assembler, Label::kDeferred),
+            if_indexisnotzero(assembler, Label::kDeferred);
+        assembler->Branch(assembler->Float64Equal(
+                              index_value, assembler->Float64Constant(0.0)),
+                          &if_indexiszero, &if_indexisnotzero);
+
+        assembler->Bind(&if_indexiszero);
+        {
+          var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
+          assembler->Goto(&if_positionissmi);
+        }
+
+        assembler->Bind(&if_indexisnotzero);
+        {
+          // The {index} is some other integral Number, that is definitely
+          // neither -0.0 nor in Smi range.
+          assembler->Return(assembler->EmptyStringConstant());
+        }
+      }
+    }
+    assembler->Bind(&if_positionissmi);
+    position = var_position.value();
+
+    // Determine the actual length of the {receiver} String.
+    Node* receiver_length =
+        assembler->LoadObjectField(receiver, String::kLengthOffset);
+
+    // Return "" if the Smi {position} is outside the bounds of the {receiver}.
+    Label if_positioninbounds(assembler),
+        if_positionnotinbounds(assembler, Label::kDeferred);
+    assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
+                      &if_positionnotinbounds, &if_positioninbounds);
+    assembler->Bind(&if_positionnotinbounds);
+    assembler->Return(assembler->EmptyStringConstant());
+    assembler->Bind(&if_positioninbounds);
+  }
+
+  // Load the character code at the {position} from the {receiver}.
+  Node* code = assembler->StringCharCodeAt(receiver, position);
+
+  // And return the single character string with only that {code}.
+  Node* result = assembler->StringFromCharCode(code);
+  assembler->Return(result);
+}
+
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+void Builtins::Generate_StringPrototypeCharCodeAt(
+    CodeStubAssembler* assembler) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* receiver = assembler->Parameter(0);
+  Node* position = assembler->Parameter(1);
+  Node* context = assembler->Parameter(4);
+
+  // Check that {receiver} is coercible to Object and convert it to a String.
+  receiver =
+      assembler->ToThisString(context, receiver, "String.prototype.charCodeAt");
+
+  // Convert the {position} to a Smi and check that it's in bounds of the
+  // {receiver}.
+  // TODO(bmeurer): Find an abstraction for this!
+  {
+    // Check if the {position} is already a Smi.
+    Variable var_position(assembler, MachineRepresentation::kTagged);
+    var_position.Bind(position);
+    Label if_positionissmi(assembler),
+        if_positionisnotsmi(assembler, Label::kDeferred);
+    assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
+                      &if_positionisnotsmi);
+    assembler->Bind(&if_positionisnotsmi);
+    {
+      // Convert the {position} to an Integer via the ToIntegerStub.
+      Callable callable = CodeFactory::ToInteger(assembler->isolate());
+      Node* index = assembler->CallStub(callable, context, position);
+
+      // Check if the resulting {index} is now a Smi.
+      Label if_indexissmi(assembler, Label::kDeferred),
+          if_indexisnotsmi(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
+                        &if_indexisnotsmi);
+
+      assembler->Bind(&if_indexissmi);
+      {
+        var_position.Bind(index);
+        assembler->Goto(&if_positionissmi);
+      }
+
+      assembler->Bind(&if_indexisnotsmi);
+      {
+        // The ToIntegerStub canonicalizes everything in Smi range to Smi
+        // representation, so any HeapNumber returned is not in Smi range.
+        // The only exception here is -0.0, which we treat as 0.
+        Node* index_value = assembler->LoadHeapNumberValue(index);
+        Label if_indexiszero(assembler, Label::kDeferred),
+            if_indexisnotzero(assembler, Label::kDeferred);
+        assembler->Branch(assembler->Float64Equal(
+                              index_value, assembler->Float64Constant(0.0)),
+                          &if_indexiszero, &if_indexisnotzero);
+
+        assembler->Bind(&if_indexiszero);
+        {
+          var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
+          assembler->Goto(&if_positionissmi);
+        }
+
+        assembler->Bind(&if_indexisnotzero);
+        {
+          // The {index} is some other integral Number, that is definitely
+          // neither -0.0 nor in Smi range.
+          assembler->Return(assembler->NaNConstant());
+        }
+      }
+    }
+    assembler->Bind(&if_positionissmi);
+    position = var_position.value();
+
+    // Determine the actual length of the {receiver} String.
+    Node* receiver_length =
+        assembler->LoadObjectField(receiver, String::kLengthOffset);
+
+    // Return NaN if the Smi {position} is outside the bounds of the {receiver}.
+    Label if_positioninbounds(assembler),
+        if_positionnotinbounds(assembler, Label::kDeferred);
+    assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
+                      &if_positionnotinbounds, &if_positioninbounds);
+    assembler->Bind(&if_positionnotinbounds);
+    assembler->Return(assembler->NaNConstant());
+    assembler->Bind(&if_positioninbounds);
+  }
+
+  // Load the character at the {position} from the {receiver}.
+  Node* value = assembler->StringCharCodeAt(receiver, position);
+  Node* result = assembler->SmiFromWord32(value);
+  assembler->Return(result);
+}
+
 // -----------------------------------------------------------------------------
 // ES6 section 21.1 ArrayBuffer Objects
 
@@ -4237,11 +4661,13 @@
 
 namespace {
 
-template <bool is_construct>
 MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
-    Isolate* isolate, BuiltinArguments<BuiltinExtraArguments::kTarget> args) {
+    Isolate* isolate,
+    BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget> args) {
   HandleScope scope(isolate);
   Handle<HeapObject> function = args.target<HeapObject>();
+  Handle<HeapObject> new_target = args.new_target();
+  bool is_construct = !new_target->IsUndefined();
   Handle<JSReceiver> receiver;
 
   DCHECK(function->IsFunctionTemplateInfo() ||
@@ -4261,9 +4687,11 @@
     }
     Handle<ObjectTemplateInfo> instance_template(
         ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
-    ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
-                               ApiNatives::InstantiateObject(instance_template),
-                               Object);
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, receiver,
+        ApiNatives::InstantiateObject(instance_template,
+                                      Handle<JSReceiver>::cast(new_target)),
+        Object);
     args[0] = *receiver;
     DCHECK_EQ(*receiver, *args.receiver());
   } else {
@@ -4301,13 +4729,9 @@
     LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
     DCHECK(raw_holder->IsJSObject());
 
-    FunctionCallbackArguments custom(isolate,
-                                     data_obj,
-                                     *function,
-                                     raw_holder,
-                                     &args[0] - 1,
-                                     args.length() - 1,
-                                     is_construct);
+    FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
+                                     *new_target, &args[0] - 1,
+                                     args.length() - 1);
 
     Handle<Object> result = custom.Call(callback);
     if (result.is_null()) result = isolate->factory()->undefined_value();
@@ -4328,19 +4752,11 @@
   HandleScope scope(isolate);
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     HandleApiCallHelper<false>(isolate, args));
+                                     HandleApiCallHelper(isolate, args));
   return *result;
 }
 
 
-BUILTIN(HandleApiCallConstruct) {
-  HandleScope scope(isolate);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     HandleApiCallHelper<true>(isolate, args));
-  return *result;
-}
-
 Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode,
                                     TailCallMode tail_call_mode) {
   switch (tail_call_mode) {
@@ -4422,11 +4838,12 @@
 namespace {
 
 class RelocatableArguments
-    : public BuiltinArguments<BuiltinExtraArguments::kTarget>,
+    : public BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>,
       public Relocatable {
  public:
   RelocatableArguments(Isolate* isolate, int length, Object** arguments)
-      : BuiltinArguments<BuiltinExtraArguments::kTarget>(length, arguments),
+      : BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>(length,
+                                                                     arguments),
         Relocatable(isolate) {}
 
   virtual inline void IterateInstance(ObjectVisitor* v) {
@@ -4450,32 +4867,31 @@
     DCHECK(function->IsFunctionTemplateInfo() || function->IsJSFunction());
     if (function->IsFunctionTemplateInfo() ||
         is_sloppy(JSFunction::cast(*function)->shared()->language_mode())) {
-      if (receiver->IsUndefined() || receiver->IsNull()) {
-        receiver = handle(isolate->global_proxy(), isolate);
-      } else {
-        ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
-                                   Object::ToObject(isolate, receiver), Object);
-      }
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+                                 Object::ConvertReceiver(isolate, receiver),
+                                 Object);
     }
   }
-  // Construct BuiltinArguments object: function, arguments reversed, receiver.
+  // Construct BuiltinArguments object:
+  // new target, function, arguments reversed, receiver.
   const int kBufferSize = 32;
   Object* small_argv[kBufferSize];
   Object** argv;
-  if (argc + 2 <= kBufferSize) {
+  if (argc + 3 <= kBufferSize) {
     argv = small_argv;
   } else {
-    argv = new Object* [argc + 2];
+    argv = new Object*[argc + 3];
   }
-  argv[argc + 1] = *receiver;
+  argv[argc + 2] = *receiver;
   for (int i = 0; i < argc; ++i) {
-    argv[argc - i] = *args[i];
+    argv[argc - i + 1] = *args[i];
   }
-  argv[0] = *function;
+  argv[1] = *function;
+  argv[0] = isolate->heap()->undefined_value();  // new target
   MaybeHandle<Object> result;
   {
-    RelocatableArguments arguments(isolate, argc + 2, &argv[argc + 1]);
-    result = HandleApiCallHelper<false>(isolate, arguments);
+    RelocatableArguments arguments(isolate, argc + 3, &argv[argc] + 2);
+    result = HandleApiCallHelper(isolate, arguments);
   }
   if (argv != small_argv) {
     delete[] argv;
@@ -4495,6 +4911,18 @@
   // Get the object called.
   JSObject* obj = JSObject::cast(*receiver);
 
+  // Set the new target.
+  HeapObject* new_target;
+  if (is_construct_call) {
+    // TODO(adamk): This should be passed through in args instead of
+    // being patched in here. We need to set a non-undefined value
+    // for v8::FunctionCallbackInfo::IsConstructCall() to get the
+    // right answer.
+    new_target = obj;
+  } else {
+    new_target = isolate->heap()->undefined_value();
+  }
+
   // Get the invocation callback from the function descriptor that was
   // used to create the called object.
   DCHECK(obj->map()->is_callable());
@@ -4517,13 +4945,9 @@
     HandleScope scope(isolate);
     LOG(isolate, ApiObjectAccess("call non-function", obj));
 
-    FunctionCallbackArguments custom(isolate,
-                                     call_data->data(),
-                                     constructor,
-                                     obj,
-                                     &args[0] - 1,
-                                     args.length() - 1,
-                                     is_construct_call);
+    FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
+                                     obj, new_target, &args[0] - 1,
+                                     args.length() - 1);
     Handle<Object> result_handle = custom.Call(callback);
     if (result_handle.is_null()) {
       result = isolate->heap()->undefined_value();
@@ -4610,6 +5034,14 @@
   NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
 }
 
+static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
+  StoreIC::GenerateMegamorphic(masm);
+}
+
+static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
+  StoreIC::GenerateMegamorphic(masm);
+}
+
 
 static void Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
   KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
@@ -4626,26 +5058,6 @@
 }
 
 
-static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
-  KeyedStoreIC::GeneratePreMonomorphic(masm);
-}
-
-
-static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
-  KeyedStoreIC::GeneratePreMonomorphic(masm);
-}
-
-
 static void Generate_Return_DebugBreak(MacroAssembler* masm) {
   DebugCodegen::GenerateDebugBreakStub(masm,
                                        DebugCodegen::SAVE_RESULT_REGISTER);
@@ -4749,11 +5161,10 @@
 Handle<Code> CodeStubAssemblerBuilder(Isolate* isolate,
                                       BuiltinDesc const* builtin_desc) {
   Zone zone(isolate->allocator());
-  compiler::CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
-                                        builtin_desc->flags,
-                                        builtin_desc->s_name);
+  CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
+                              builtin_desc->flags, builtin_desc->s_name);
   // Generate the code/adaptor.
-  typedef void (*Generator)(compiler::CodeStubAssembler*);
+  typedef void (*Generator)(CodeStubAssembler*);
   Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
   g(&assembler);
   return assembler.GenerateCode();
@@ -4907,6 +5318,242 @@
   masm->TailCallRuntime(Runtime::kStackGuard);
 }
 
+namespace {
+
+void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
+                              compiler::Node* context,
+                              compiler::Node** out_instance_type,
+                              compiler::Node** out_backing_store) {
+  using namespace compiler;
+  CodeStubAssembler::Label is_smi(a), not_smi(a), is_typed_array(a),
+      not_typed_array(a), is_shared(a), not_shared(a), is_float_or_clamped(a),
+      not_float_or_clamped(a), invalid(a);
+
+  // Fail if it is not a heap object.
+  a->Branch(a->WordIsSmi(tagged), &is_smi, &not_smi);
+  a->Bind(&is_smi);
+  a->Goto(&invalid);
+
+  // Fail if the array's instance type is not JSTypedArray.
+  a->Bind(&not_smi);
+  a->Branch(a->WordEqual(a->LoadInstanceType(tagged),
+                         a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+            &is_typed_array, &not_typed_array);
+  a->Bind(&not_typed_array);
+  a->Goto(&invalid);
+
+  // Fail if the array's JSArrayBuffer is not shared.
+  a->Bind(&is_typed_array);
+  Node* array_buffer = a->LoadObjectField(tagged, JSTypedArray::kBufferOffset);
+  Node* is_buffer_shared = a->BitFieldDecode<JSArrayBuffer::IsShared>(
+      a->LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldSlot));
+  a->Branch(is_buffer_shared, &is_shared, &not_shared);
+  a->Bind(&not_shared);
+  a->Goto(&invalid);
+
+  // Fail if the array's element type is float32, float64 or clamped.
+  a->Bind(&is_shared);
+  Node* elements_instance_type = a->LoadInstanceType(
+      a->LoadObjectField(tagged, JSObject::kElementsOffset));
+  STATIC_ASSERT(FIXED_INT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+  a->Branch(a->Int32LessThan(elements_instance_type,
+                             a->Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)),
+            &not_float_or_clamped, &is_float_or_clamped);
+  a->Bind(&is_float_or_clamped);
+  a->Goto(&invalid);
+
+  a->Bind(&invalid);
+  a->CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
+                 tagged);
+  a->Return(a->UndefinedConstant());
+
+  a->Bind(&not_float_or_clamped);
+  *out_instance_type = elements_instance_type;
+
+  Node* backing_store =
+      a->LoadObjectField(array_buffer, JSArrayBuffer::kBackingStoreOffset);
+  Node* byte_offset = a->ChangeUint32ToWord(a->TruncateTaggedToWord32(
+      context,
+      a->LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset)));
+  *out_backing_store = a->IntPtrAdd(backing_store, byte_offset);
+}
+
+// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
+compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
+                                                 compiler::Node* tagged,
+                                                 compiler::Node* context) {
+  using namespace compiler;
+  CodeStubAssembler::Variable var_result(a, MachineRepresentation::kWord32);
+
+  Callable to_number = CodeFactory::ToNumber(a->isolate());
+  Node* number_index = a->CallStub(to_number, context, tagged);
+  CodeStubAssembler::Label done(a, &var_result);
+
+  CodeStubAssembler::Label if_numberissmi(a), if_numberisnotsmi(a);
+  a->Branch(a->WordIsSmi(number_index), &if_numberissmi, &if_numberisnotsmi);
+
+  a->Bind(&if_numberissmi);
+  {
+    var_result.Bind(a->SmiToWord32(number_index));
+    a->Goto(&done);
+  }
+
+  a->Bind(&if_numberisnotsmi);
+  {
+    Node* number_index_value = a->LoadHeapNumberValue(number_index);
+    Node* access_index = a->TruncateFloat64ToWord32(number_index_value);
+    Node* test_index = a->ChangeInt32ToFloat64(access_index);
+
+    CodeStubAssembler::Label if_indexesareequal(a), if_indexesarenotequal(a);
+    a->Branch(a->Float64Equal(number_index_value, test_index),
+              &if_indexesareequal, &if_indexesarenotequal);
+
+    a->Bind(&if_indexesareequal);
+    {
+      var_result.Bind(access_index);
+      a->Goto(&done);
+    }
+
+    a->Bind(&if_indexesarenotequal);
+    a->Return(
+        a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+  }
+
+  a->Bind(&done);
+  return var_result.value();
+}
+
+void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
+                         compiler::Node* array_length_word,
+                         compiler::Node* context) {
+  using namespace compiler;
+  // Check if the index is in bounds. If not, throw RangeError.
+  CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
+  a->Branch(
+      a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
+                a->Int32GreaterThanOrEqual(index_word, array_length_word)),
+      &if_notinbounds, &if_inbounds);
+  a->Bind(&if_notinbounds);
+  a->Return(
+      a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+  a->Bind(&if_inbounds);
+}
+
+}  // anonymous namespace
+
+void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
+  using namespace compiler;
+  Node* array = a->Parameter(1);
+  Node* index = a->Parameter(2);
+  Node* context = a->Parameter(3 + 2);
+
+  Node* instance_type;
+  Node* backing_store;
+  ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+
+  Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
+  Node* array_length_word32 = a->TruncateTaggedToWord32(
+      context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
+  ValidateAtomicIndex(a, index_word32, array_length_word32, context);
+  Node* index_word = a->ChangeUint32ToWord(index_word32);
+
+  CodeStubAssembler::Label i8(a), u8(a), i16(a), u16(a), i32(a), u32(a),
+      other(a);
+  int32_t case_values[] = {
+      FIXED_INT8_ARRAY_TYPE,   FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
+      FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+  };
+  CodeStubAssembler::Label* case_labels[] = {
+      &i8, &u8, &i16, &u16, &i32, &u32,
+  };
+  a->Switch(instance_type, &other, case_values, case_labels,
+            arraysize(case_labels));
+
+  a->Bind(&i8);
+  a->Return(
+      a->SmiTag(a->AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+
+  a->Bind(&u8);
+  a->Return(a->SmiTag(
+      a->AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
+
+  a->Bind(&i16);
+  a->Return(a->SmiTag(a->AtomicLoad(MachineType::Int16(), backing_store,
+                                    a->WordShl(index_word, 1))));
+
+  a->Bind(&u16);
+  a->Return(a->SmiTag(a->AtomicLoad(MachineType::Uint16(), backing_store,
+                                    a->WordShl(index_word, 1))));
+
+  a->Bind(&i32);
+  a->Return(a->ChangeInt32ToTagged(a->AtomicLoad(
+      MachineType::Int32(), backing_store, a->WordShl(index_word, 2))));
+
+  a->Bind(&u32);
+  a->Return(a->ChangeUint32ToTagged(a->AtomicLoad(
+      MachineType::Uint32(), backing_store, a->WordShl(index_word, 2))));
+
+  // This shouldn't happen, we've already validated the type.
+  a->Bind(&other);
+  a->Return(a->Int32Constant(0));
+}
+
+void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
+  using namespace compiler;
+  Node* array = a->Parameter(1);
+  Node* index = a->Parameter(2);
+  Node* value = a->Parameter(3);
+  Node* context = a->Parameter(4 + 2);
+
+  Node* instance_type;
+  Node* backing_store;
+  ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+
+  Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
+  Node* array_length_word32 = a->TruncateTaggedToWord32(
+      context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
+  ValidateAtomicIndex(a, index_word32, array_length_word32, context);
+  Node* index_word = a->ChangeUint32ToWord(index_word32);
+
+  Callable to_integer = CodeFactory::ToInteger(a->isolate());
+  Node* value_integer = a->CallStub(to_integer, context, value);
+  Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
+
+  CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
+  int32_t case_values[] = {
+      FIXED_INT8_ARRAY_TYPE,   FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
+      FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+  };
+  CodeStubAssembler::Label* case_labels[] = {
+      &u8, &u8, &u16, &u16, &u32, &u32,
+  };
+  a->Switch(instance_type, &other, case_values, case_labels,
+            arraysize(case_labels));
+
+  a->Bind(&u8);
+  a->AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
+                 value_word32);
+  a->Return(value_integer);
+
+  a->Bind(&u16);
+  a->SmiTag(a->AtomicStore(MachineRepresentation::kWord16, backing_store,
+                           a->WordShl(index_word, 1), value_word32));
+  a->Return(value_integer);
+
+  a->Bind(&u32);
+  a->AtomicStore(MachineRepresentation::kWord32, backing_store,
+                 a->WordShl(index_word, 2), value_word32);
+  a->Return(value_integer);
+
+  // This shouldn't happen, we've already validated the type.
+  a->Bind(&other);
+  a->Return(a->Int32Constant(0));
+}
 
 #define DEFINE_BUILTIN_ACCESSOR_C(name, ignore)               \
 Handle<Code> Builtins::name() {                               \
diff --git a/src/builtins.h b/src/builtins.h
index 221d06f..ff1d77d 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -11,13 +11,9 @@
 namespace v8 {
 namespace internal {
 
-namespace compiler {
-
 // Forward declarations.
 class CodeStubAssembler;
 
-}  // namespace compiler
-
 // Specifies extra arguments required by a C++ builtin.
 enum class BuiltinExtraArguments : uint8_t {
   kNone = 0u,
@@ -64,7 +60,6 @@
   V(EmptyFunction, kNone)                                      \
                                                                \
   V(ArrayConcat, kNone)                                        \
-  V(ArrayIsArray, kNone)                                       \
   V(ArrayPop, kNone)                                           \
   V(ArrayPush, kNone)                                          \
   V(ArrayShift, kNone)                                         \
@@ -119,6 +114,10 @@
   V(FunctionPrototypeToString, kNone)                          \
                                                                \
   V(GeneratorFunctionConstructor, kTargetAndNewTarget)         \
+  V(AsyncFunctionConstructor, kTargetAndNewTarget)             \
+                                                               \
+  V(GlobalEncodeURI, kNone)                                    \
+  V(GlobalEncodeURIComponent, kNone)                           \
                                                                \
   V(GlobalEval, kTarget)                                       \
                                                                \
@@ -130,21 +129,28 @@
                                                                \
   V(ObjectAssign, kNone)                                       \
   V(ObjectCreate, kNone)                                       \
+  V(ObjectDefineGetter, kNone)                                 \
+  V(ObjectDefineProperties, kNone)                             \
+  V(ObjectDefineProperty, kNone)                               \
+  V(ObjectDefineSetter, kNone)                                 \
+  V(ObjectEntries, kNone)                                      \
   V(ObjectFreeze, kNone)                                       \
   V(ObjectGetOwnPropertyDescriptor, kNone)                     \
+  V(ObjectGetOwnPropertyDescriptors, kNone)                    \
   V(ObjectGetOwnPropertyNames, kNone)                          \
   V(ObjectGetOwnPropertySymbols, kNone)                        \
+  V(ObjectGetPrototypeOf, kNone)                               \
   V(ObjectIs, kNone)                                           \
   V(ObjectIsExtensible, kNone)                                 \
   V(ObjectIsFrozen, kNone)                                     \
   V(ObjectIsSealed, kNone)                                     \
   V(ObjectKeys, kNone)                                         \
-  V(ObjectValues, kNone)                                       \
-  V(ObjectEntries, kNone)                                      \
-  V(ObjectGetOwnPropertyDescriptors, kNone)                    \
+  V(ObjectLookupGetter, kNone)                                 \
+  V(ObjectLookupSetter, kNone)                                 \
   V(ObjectPreventExtensions, kNone)                            \
-  V(ObjectSeal, kNone)                                         \
   V(ObjectProtoToString, kNone)                                \
+  V(ObjectSeal, kNone)                                         \
+  V(ObjectValues, kNone)                                       \
                                                                \
   V(ProxyConstructor, kNone)                                   \
   V(ProxyConstructor_ConstructStub, kTarget)                   \
@@ -166,8 +172,7 @@
   V(SymbolConstructor, kNone)                                  \
   V(SymbolConstructor_ConstructStub, kTarget)                  \
                                                                \
-  V(HandleApiCall, kTarget)                                    \
-  V(HandleApiCallConstruct, kTarget)                           \
+  V(HandleApiCall, kTargetAndNewTarget)                        \
   V(HandleApiCallAsFunction, kNone)                            \
   V(HandleApiCallAsConstructor, kNone)                         \
                                                                \
@@ -176,6 +181,9 @@
 
 // Define list of builtins implemented in assembly.
 #define BUILTIN_LIST_A(V)                                                      \
+  V(AllocateInNewSpace, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
+  V(AllocateInOldSpace, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
+                                                                               \
   V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
                                                                                \
   V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState)      \
@@ -219,7 +227,9 @@
   V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
   V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
   V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
+  V(ResumeGeneratorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)        \
   V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState)                      \
+  V(CompileBaseline, BUILTIN, UNINITIALIZED, kNoExtraICState)                  \
   V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)                 \
   V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
   V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
@@ -229,13 +239,9 @@
   V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState)     \
                                                                                \
   V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
-  V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)        \
   V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
   V(InterpreterPushArgsAndTailCall, BUILTIN, UNINITIALIZED, kNoExtraICState)   \
   V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState)  \
-  V(InterpreterNotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)     \
-  V(InterpreterNotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
-  V(InterpreterNotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
   V(InterpreterEnterBytecodeDispatch, BUILTIN, UNINITIALIZED, kNoExtraICState) \
                                                                                \
   V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                      \
@@ -248,15 +254,11 @@
   V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC,                            \
     StoreICState::kStrictModeState)                                            \
                                                                                \
-  V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState)   \
-  V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC,               \
-    kNoExtraICState)                                                           \
-  V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState)    \
+  V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, kNoExtraICState)               \
+  V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC,                         \
+    StoreICState::kStrictModeState)                                            \
                                                                                \
-  V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED,             \
-    StoreICState::kStrictModeState)                                            \
-  V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC,        \
-    StoreICState::kStrictModeState)                                            \
+  V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState)    \
   V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC,              \
     StoreICState::kStrictModeState)                                            \
                                                                                \
@@ -279,7 +281,6 @@
   V(DatePrototypeGetUTCMonth, BUILTIN, UNINITIALIZED, kNoExtraICState)         \
   V(DatePrototypeGetUTCSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
                                                                                \
-  V(FunctionHasInstance, BUILTIN, UNINITIALIZED, kNoExtraICState)              \
   V(FunctionPrototypeApply, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
   V(FunctionPrototypeCall, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
                                                                                \
@@ -308,14 +309,23 @@
   CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
 
 // Define list of builtins implemented in TurboFan (with JS linkage).
-#define BUILTIN_LIST_T(V) \
-  V(MathCeil, 2)          \
-  V(MathClz32, 2)         \
-  V(MathFloor, 2)         \
-  V(MathRound, 2)         \
-  V(MathSqrt, 2)          \
-  V(MathTrunc, 2)         \
-  V(ObjectHasOwnProperty, 2)
+#define BUILTIN_LIST_T(V)            \
+  V(FunctionPrototypeHasInstance, 2) \
+  V(GeneratorPrototypeNext, 2)       \
+  V(GeneratorPrototypeReturn, 2)     \
+  V(GeneratorPrototypeThrow, 2)      \
+  V(MathCeil, 2)                     \
+  V(MathClz32, 2)                    \
+  V(MathFloor, 2)                    \
+  V(MathRound, 2)                    \
+  V(MathSqrt, 2)                     \
+  V(MathTrunc, 2)                    \
+  V(ObjectHasOwnProperty, 2)         \
+  V(ArrayIsArray, 2)                 \
+  V(StringPrototypeCharAt, 2)        \
+  V(StringPrototypeCharCodeAt, 2)    \
+  V(AtomicsLoad, 3)                  \
+  V(AtomicsStore, 4)
 
 // Define list of builtin handlers implemented in assembly.
 #define BUILTIN_LIST_H(V)                    \
@@ -439,8 +449,11 @@
   static void Generate_Adaptor(MacroAssembler* masm,
                                CFunctionId id,
                                BuiltinExtraArguments extra_args);
+  static void Generate_AllocateInNewSpace(MacroAssembler* masm);
+  static void Generate_AllocateInOldSpace(MacroAssembler* masm);
   static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
   static void Generate_CompileLazy(MacroAssembler* masm);
+  static void Generate_CompileBaseline(MacroAssembler* masm);
   static void Generate_InOptimizationQueue(MacroAssembler* masm);
   static void Generate_CompileOptimized(MacroAssembler* masm);
   static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
@@ -450,6 +463,7 @@
   static void Generate_JSConstructStubApi(MacroAssembler* masm);
   static void Generate_JSEntryTrampoline(MacroAssembler* masm);
   static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
+  static void Generate_ResumeGeneratorTrampoline(MacroAssembler* masm);
   static void Generate_NotifyDeoptimized(MacroAssembler* masm);
   static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
   static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
@@ -578,7 +592,6 @@
   // ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( )
   static void Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm);
 
-  static void Generate_FunctionHasInstance(MacroAssembler* masm);
   static void Generate_FunctionPrototypeApply(MacroAssembler* masm);
   static void Generate_FunctionPrototypeCall(MacroAssembler* masm);
 
@@ -589,11 +602,11 @@
   static void Generate_ArrayCode(MacroAssembler* masm);
 
   // ES6 section 20.2.2.10 Math.ceil ( x )
-  static void Generate_MathCeil(compiler::CodeStubAssembler* assembler);
+  static void Generate_MathCeil(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.11 Math.clz32 ( x )
-  static void Generate_MathClz32(compiler::CodeStubAssembler* assembler);
+  static void Generate_MathClz32(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.16 Math.floor ( x )
-  static void Generate_MathFloor(compiler::CodeStubAssembler* assembler);
+  static void Generate_MathFloor(CodeStubAssembler* assembler);
   enum class MathMaxMinKind { kMax, kMin };
   static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
   // ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
@@ -605,20 +618,38 @@
     Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
   }
   // ES6 section 20.2.2.28 Math.round ( x )
-  static void Generate_MathRound(compiler::CodeStubAssembler* assembler);
+  static void Generate_MathRound(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.32 Math.sqrt ( x )
-  static void Generate_MathSqrt(compiler::CodeStubAssembler* assembler);
+  static void Generate_MathSqrt(CodeStubAssembler* assembler);
   // ES6 section 20.2.2.35 Math.trunc ( x )
-  static void Generate_MathTrunc(compiler::CodeStubAssembler* assembler);
+  static void Generate_MathTrunc(CodeStubAssembler* assembler);
 
   // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
   static void Generate_NumberConstructor(MacroAssembler* masm);
   // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
   static void Generate_NumberConstructor_ConstructStub(MacroAssembler* masm);
 
+  // ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
+  static void Generate_FunctionPrototypeHasInstance(
+      CodeStubAssembler* assembler);
+
+  // ES6 section 25.3.1.2 Generator.prototype.next ( value )
+  static void Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler);
+  // ES6 section 25.3.1.3 Generator.prototype.return ( value )
+  static void Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler);
+  // ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
+  static void Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler);
+
   // ES6 section 19.1.3.2 Object.prototype.hasOwnProperty
-  static void Generate_ObjectHasOwnProperty(
-      compiler::CodeStubAssembler* assembler);
+  static void Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler);
+
+  // ES6 section 22.1.2.2 Array.isArray
+  static void Generate_ArrayIsArray(CodeStubAssembler* assembler);
+
+  // ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+  static void Generate_StringPrototypeCharAt(CodeStubAssembler* assembler);
+  // ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+  static void Generate_StringPrototypeCharCodeAt(CodeStubAssembler* assembler);
 
   static void Generate_StringConstructor(MacroAssembler* masm);
   static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
@@ -627,7 +658,7 @@
   static void Generate_StackCheck(MacroAssembler* masm);
 
   static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
-  static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
+  static void Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm);
   static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
     return Generate_InterpreterPushArgsAndCallImpl(masm,
                                                    TailCallMode::kDisallow);
@@ -638,10 +669,6 @@
   static void Generate_InterpreterPushArgsAndCallImpl(
       MacroAssembler* masm, TailCallMode tail_call_mode);
   static void Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm);
-  static void Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm);
-  static void Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm);
-  static void Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm);
-  static void Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm);
 
 #define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C)                \
   static void Generate_Make##C##CodeYoungAgainEvenMarking(   \
@@ -655,6 +682,9 @@
   static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
   static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
 
+  static void Generate_AtomicsLoad(CodeStubAssembler* assembler);
+  static void Generate_AtomicsStore(CodeStubAssembler* assembler);
+
   static void InitBuiltinFunctionTable();
 
   bool initialized_;
diff --git a/src/cancelable-task.cc b/src/cancelable-task.cc
index d231bb7..defbb44 100644
--- a/src/cancelable-task.cc
+++ b/src/cancelable-task.cc
@@ -14,7 +14,6 @@
 Cancelable::Cancelable(CancelableTaskManager* parent)
     : parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) {
   id_ = parent->Register(this);
-  CHECK(id_ != 0);
 }
 
 
@@ -27,49 +26,35 @@
   }
 }
 
-
-static bool ComparePointers(void* ptr1, void* ptr2) { return ptr1 == ptr2; }
-
-
-CancelableTaskManager::CancelableTaskManager()
-    : task_id_counter_(0), cancelable_tasks_(ComparePointers) {}
-
+CancelableTaskManager::CancelableTaskManager() : task_id_counter_(0) {}
 
 uint32_t CancelableTaskManager::Register(Cancelable* task) {
   base::LockGuard<base::Mutex> guard(&mutex_);
   uint32_t id = ++task_id_counter_;
   // The loop below is just used when task_id_counter_ overflows.
-  while ((id == 0) || (cancelable_tasks_.Lookup(reinterpret_cast<void*>(id),
-                                                id) != nullptr)) {
-    ++id;
-  }
-  HashMap::Entry* entry =
-      cancelable_tasks_.LookupOrInsert(reinterpret_cast<void*>(id), id);
-  entry->value = task;
+  while (cancelable_tasks_.count(id) > 0) ++id;
+  cancelable_tasks_[id] = task;
   return id;
 }
 
 
 void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
   base::LockGuard<base::Mutex> guard(&mutex_);
-  void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+  size_t removed = cancelable_tasks_.erase(id);
   USE(removed);
-  DCHECK(removed != nullptr);
+  DCHECK_NE(0, removed);
   cancelable_tasks_barrier_.NotifyOne();
 }
 
 
 bool CancelableTaskManager::TryAbort(uint32_t id) {
   base::LockGuard<base::Mutex> guard(&mutex_);
-  HashMap::Entry* entry =
-      cancelable_tasks_.Lookup(reinterpret_cast<void*>(id), id);
-  if (entry != nullptr) {
-    Cancelable* value = reinterpret_cast<Cancelable*>(entry->value);
+  auto entry = cancelable_tasks_.find(id);
+  if (entry != cancelable_tasks_.end()) {
+    Cancelable* value = entry->second;
     if (value->Cancel()) {
       // Cannot call RemoveFinishedTask here because of recursive locking.
-      void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
-      USE(removed);
-      DCHECK(removed != nullptr);
+      cancelable_tasks_.erase(entry);
       cancelable_tasks_barrier_.NotifyOne();
       return true;
     }
@@ -85,27 +70,19 @@
   // started.
   base::LockGuard<base::Mutex> guard(&mutex_);
 
-  // HashMap does not support removing while iterating, hence keep a set of
-  // entries that are to be removed.
-  std::set<uint32_t> to_remove;
-
-  // Cancelable tasks could potentially register new tasks, requiring a loop
-  // here.
-  while (cancelable_tasks_.occupancy() > 0) {
-    for (HashMap::Entry* p = cancelable_tasks_.Start(); p != nullptr;
-         p = cancelable_tasks_.Next(p)) {
-      if (reinterpret_cast<Cancelable*>(p->value)->Cancel()) {
-        to_remove.insert(reinterpret_cast<Cancelable*>(p->value)->id());
+  // Cancelable tasks could be running or could potentially register new
+  // tasks, requiring a loop here.
+  while (!cancelable_tasks_.empty()) {
+    for (auto it = cancelable_tasks_.begin(); it != cancelable_tasks_.end();) {
+      auto current = it;
+      // We need to get to the next element before erasing the current.
+      ++it;
+      if (current->second->Cancel()) {
+        cancelable_tasks_.erase(current);
       }
     }
-    // Remove tasks that were successfully canceled.
-    for (auto id : to_remove) {
-      cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
-    }
-    to_remove.clear();
-
-    // Finally, wait for already running background tasks.
-    if (cancelable_tasks_.occupancy() > 0) {
+    // Wait for already running background tasks.
+    if (!cancelable_tasks_.empty()) {
       cancelable_tasks_barrier_.Wait(&mutex_);
     }
   }
diff --git a/src/cancelable-task.h b/src/cancelable-task.h
index a8387fc..b1d62aa 100644
--- a/src/cancelable-task.h
+++ b/src/cancelable-task.h
@@ -5,11 +5,12 @@
 #ifndef V8_CANCELABLE_TASK_H_
 #define V8_CANCELABLE_TASK_H_
 
+#include <map>
+
 #include "include/v8-platform.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
 #include "src/base/macros.h"
 #include "src/base/platform/condition-variable.h"
-#include "src/hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -51,7 +52,7 @@
   uint32_t task_id_counter_;
 
   // A set of cancelable tasks that are currently registered.
-  HashMap cancelable_tasks_;
+  std::map<uint32_t, Cancelable*> cancelable_tasks_;
 
   // Mutex and condition variable enabling concurrent register and removing, as
   // well as waiting for background tasks on {CancelAndWait}.
@@ -104,13 +105,13 @@
   }
 
   CancelableTaskManager* parent_;
-  AtomicValue<Status> status_;
+  base::AtomicValue<Status> status_;
   uint32_t id_;
 
   // The counter is incremented for failing tries to cancel a task. This can be
   // used by the task itself as an indication how often external entities tried
   // to abort it.
-  AtomicNumber<intptr_t> cancel_counter_;
+  base::AtomicNumber<intptr_t> cancel_counter_;
 
   friend class CancelableTaskManager;
 
diff --git a/src/code-factory.cc b/src/code-factory.cc
index fbfdd5f..dd12b05 100644
--- a/src/code-factory.cc
+++ b/src/code-factory.cc
@@ -13,11 +13,15 @@
 
 // static
 Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode) {
-  return Callable(LoadIC::initialize_stub(
-                      isolate, LoadICState(typeof_mode).GetExtraICState()),
-                  LoadDescriptor(isolate));
+  LoadICTrampolineStub stub(isolate, LoadICState(typeof_mode));
+  return Callable(stub.GetCode(), LoadDescriptor(isolate));
 }
 
+// static
+Callable CodeFactory::ApiGetter(Isolate* isolate) {
+  CallApiGetterStub stub(isolate);
+  return Callable(stub.GetCode(), ApiGetterDescriptor(isolate));
+}
 
 // static
 Callable CodeFactory::LoadICInOptimizedCode(
@@ -32,8 +36,8 @@
 
 // static
 Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
-  return Callable(KeyedLoadIC::initialize_stub(isolate, kNoExtraICState),
-                  LoadDescriptor(isolate));
+  KeyedLoadICTrampolineStub stub(isolate, LoadICState(kNoExtraICState));
+  return Callable(stub.GetCode(), LoadDescriptor(isolate));
 }
 
 
@@ -53,8 +57,8 @@
 Callable CodeFactory::CallIC(Isolate* isolate, int argc,
                              ConvertReceiverMode mode,
                              TailCallMode tail_call_mode) {
-  return Callable(CallIC::initialize_stub(isolate, argc, mode, tail_call_mode),
-                  CallFunctionWithFeedbackDescriptor(isolate));
+  CallICTrampolineStub stub(isolate, CallICState(argc, mode, tail_call_mode));
+  return Callable(stub.GetCode(), CallFunctionWithFeedbackDescriptor(isolate));
 }
 
 
@@ -70,9 +74,8 @@
 
 // static
 Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
-  return Callable(
-      StoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
-      VectorStoreICTrampolineDescriptor(isolate));
+  VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
+  return Callable(stub.GetCode(), VectorStoreICTrampolineDescriptor(isolate));
 }
 
 
@@ -92,9 +95,8 @@
 // static
 Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
                                    LanguageMode language_mode) {
-  return Callable(
-      KeyedStoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
-      VectorStoreICTrampolineDescriptor(isolate));
+  VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
+  return Callable(stub.GetCode(), VectorStoreICTrampolineDescriptor(isolate));
 }
 
 
@@ -225,6 +227,42 @@
 }
 
 // static
+Callable CodeFactory::Multiply(Isolate* isolate) {
+  MultiplyStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::Divide(Isolate* isolate) {
+  DivideStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::Modulus(Isolate* isolate) {
+  ModulusStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::ShiftRight(Isolate* isolate) {
+  ShiftRightStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::ShiftRightLogical(Isolate* isolate) {
+  ShiftRightLogicalStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::ShiftLeft(Isolate* isolate) {
+  ShiftLeftStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
 Callable CodeFactory::BitwiseAnd(Isolate* isolate) {
   BitwiseAndStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -243,6 +281,18 @@
 }
 
 // static
+Callable CodeFactory::Inc(Isolate* isolate) {
+  IncStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
+Callable CodeFactory::Dec(Isolate* isolate) {
+  DecStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+// static
 Callable CodeFactory::LessThan(Isolate* isolate) {
   LessThanStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -365,9 +415,9 @@
 
 
 // static
-Callable CodeFactory::StoreInterceptor(Isolate* isolate) {
-  StoreInterceptorStub stub(isolate);
-  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
+  return Callable(isolate->builtins()->ResumeGeneratorTrampoline(),
+                  ResumeGeneratorDescriptor(isolate));
 }
 
 // static
@@ -423,22 +473,25 @@
 
 
 // static
-Callable CodeFactory::FastNewRestParameter(Isolate* isolate) {
-  FastNewRestParameterStub stub(isolate);
+Callable CodeFactory::FastNewRestParameter(Isolate* isolate,
+                                           bool skip_stub_frame) {
+  FastNewRestParameterStub stub(isolate, skip_stub_frame);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
 
 // static
-Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate) {
-  FastNewSloppyArgumentsStub stub(isolate);
+Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate,
+                                             bool skip_stub_frame) {
+  FastNewSloppyArgumentsStub stub(isolate, skip_stub_frame);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
 
 // static
-Callable CodeFactory::FastNewStrictArguments(Isolate* isolate) {
-  FastNewStrictArgumentsStub stub(isolate);
+Callable CodeFactory::FastNewStrictArguments(Isolate* isolate,
+                                             bool skip_stub_frame) {
+  FastNewStrictArgumentsStub stub(isolate, skip_stub_frame);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
@@ -449,13 +502,6 @@
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
-
-// static
-Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
-  AllocateMutableHeapNumberStub stub(isolate);
-  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
-}
-
 #define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type)          \
   Callable CodeFactory::Allocate##Type(Isolate* isolate) {              \
     Allocate##Type##Stub stub(isolate);                                 \
@@ -465,13 +511,6 @@
 #undef SIMD128_ALLOC
 
 // static
-Callable CodeFactory::AllocateInNewSpace(Isolate* isolate) {
-  AllocateInNewSpaceStub stub(isolate);
-  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
-}
-
-
-// static
 Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
   return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(),
                   ArgumentAdaptorDescriptor(isolate));
@@ -506,6 +545,11 @@
                   ConstructTrampolineDescriptor(isolate));
 }
 
+// static
+Callable CodeFactory::HasProperty(Isolate* isolate) {
+  HasPropertyStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
 
 // static
 Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
diff --git a/src/code-factory.h b/src/code-factory.h
index deb125f..7b43cae 100644
--- a/src/code-factory.h
+++ b/src/code-factory.h
@@ -54,13 +54,15 @@
       Isolate* isolate, LanguageMode mode,
       InlineCacheState initialization_state);
 
-  static Callable StoreInterceptor(Isolate* isolate);
+  static Callable ResumeGenerator(Isolate* isolate);
 
   static Callable CompareIC(Isolate* isolate, Token::Value op);
   static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
 
   static Callable BinaryOpIC(Isolate* isolate, Token::Value op);
 
+  static Callable ApiGetter(Isolate* isolate);
+
   // Code stubs. Add methods here as needed to reduce dependency on
   // code-stubs.h.
   static Callable InstanceOf(Isolate* isolate);
@@ -82,9 +84,17 @@
 
   static Callable Add(Isolate* isolate);
   static Callable Subtract(Isolate* isolate);
+  static Callable Multiply(Isolate* isolate);
+  static Callable Divide(Isolate* isolate);
+  static Callable Modulus(Isolate* isolate);
+  static Callable ShiftRight(Isolate* isolate);
+  static Callable ShiftRightLogical(Isolate* isolate);
+  static Callable ShiftLeft(Isolate* isolate);
   static Callable BitwiseAnd(Isolate* isolate);
   static Callable BitwiseOr(Isolate* isolate);
   static Callable BitwiseXor(Isolate* isolate);
+  static Callable Inc(Isolate* isolate);
+  static Callable Dec(Isolate* isolate);
   static Callable LessThan(Isolate* isolate);
   static Callable LessThanOrEqual(Isolate* isolate);
   static Callable GreaterThan(Isolate* isolate);
@@ -115,17 +125,18 @@
   static Callable FastNewClosure(Isolate* isolate, LanguageMode language_mode,
                                  FunctionKind kind);
   static Callable FastNewObject(Isolate* isolate);
-  static Callable FastNewRestParameter(Isolate* isolate);
-  static Callable FastNewSloppyArguments(Isolate* isolate);
-  static Callable FastNewStrictArguments(Isolate* isolate);
+  static Callable FastNewRestParameter(Isolate* isolate,
+                                       bool skip_stub_frame = false);
+  static Callable FastNewSloppyArguments(Isolate* isolate,
+                                         bool skip_stub_frame = false);
+  static Callable FastNewStrictArguments(Isolate* isolate,
+                                         bool skip_stub_frame = false);
 
   static Callable AllocateHeapNumber(Isolate* isolate);
-  static Callable AllocateMutableHeapNumber(Isolate* isolate);
 #define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
   static Callable Allocate##Type(Isolate* isolate);
   SIMD128_TYPES(SIMD128_ALLOC)
 #undef SIMD128_ALLOC
-  static Callable AllocateInNewSpace(Isolate* isolate);
 
   static Callable ArgumentAdaptor(Isolate* isolate);
   static Callable Call(Isolate* isolate,
@@ -135,6 +146,7 @@
       Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
   static Callable Construct(Isolate* isolate);
   static Callable ConstructFunction(Isolate* isolate);
+  static Callable HasProperty(Isolate* isolate);
 
   static Callable InterpreterPushArgsAndCall(Isolate* isolate,
                                              TailCallMode tail_call_mode);
diff --git a/src/code-stub-assembler.cc b/src/code-stub-assembler.cc
new file mode 100644
index 0000000..3e26b52
--- /dev/null
+++ b/src/code-stub-assembler.cc
@@ -0,0 +1,1572 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+                                     const CallInterfaceDescriptor& descriptor,
+                                     Code::Flags flags, const char* name,
+                                     size_t result_size)
+    : compiler::CodeAssembler(isolate, zone, descriptor, flags, name,
+                              result_size) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+                                     int parameter_count, Code::Flags flags,
+                                     const char* name)
+    : compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
+
+Node* CodeStubAssembler::BooleanMapConstant() {
+  return HeapConstant(isolate()->factory()->boolean_map());
+}
+
+Node* CodeStubAssembler::EmptyStringConstant() {
+  return LoadRoot(Heap::kempty_stringRootIndex);
+}
+
+Node* CodeStubAssembler::HeapNumberMapConstant() {
+  return HeapConstant(isolate()->factory()->heap_number_map());
+}
+
+Node* CodeStubAssembler::NoContextConstant() {
+  return SmiConstant(Smi::FromInt(0));
+}
+
+Node* CodeStubAssembler::NullConstant() {
+  return LoadRoot(Heap::kNullValueRootIndex);
+}
+
+Node* CodeStubAssembler::UndefinedConstant() {
+  return LoadRoot(Heap::kUndefinedValueRootIndex);
+}
+
+Node* CodeStubAssembler::StaleRegisterConstant() {
+  return LoadRoot(Heap::kStaleRegisterRootIndex);
+}
+
+Node* CodeStubAssembler::Float64Round(Node* x) {
+  Node* one = Float64Constant(1.0);
+  Node* one_half = Float64Constant(0.5);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this);
+
+  // Round up {x} towards Infinity.
+  var_x.Bind(Float64Ceil(x));
+
+  GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
+         &return_x);
+  var_x.Bind(Float64Sub(var_x.value(), one));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Ceil(Node* x) {
+  if (IsFloat64RoundUpSupported()) {
+    return Float64RoundUp(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than zero.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]0,2^52[.
+    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+    // Round positive {x} towards Infinity.
+    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+    GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
+    var_x.Bind(Float64Add(var_x.value(), one));
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]-2^52,0[
+    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+    GotoUnless(Float64LessThan(x, zero), &return_x);
+
+    // Round negated {x} towards Infinity and return the result negated.
+    Node* minus_x = Float64Neg(x);
+    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+    GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+    var_x.Bind(Float64Sub(var_x.value(), one));
+    Goto(&return_minus_x);
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Floor(Node* x) {
+  if (IsFloat64RoundDownSupported()) {
+    return Float64RoundDown(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than zero.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]0,2^52[.
+    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+    // Round positive {x} towards -Infinity.
+    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+    GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+    var_x.Bind(Float64Sub(var_x.value(), one));
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]-2^52,0[
+    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+    GotoUnless(Float64LessThan(x, zero), &return_x);
+
+    // Round negated {x} towards -Infinity and return the result negated.
+    Node* minus_x = Float64Neg(x);
+    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+    GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+    var_x.Bind(Float64Add(var_x.value(), one));
+    Goto(&return_minus_x);
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Trunc(Node* x) {
+  if (IsFloat64RoundTruncateSupported()) {
+    return Float64RoundTruncate(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than 0.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    if (IsFloat64RoundDownSupported()) {
+      var_x.Bind(Float64RoundDown(x));
+    } else {
+      // Just return {x} unless it's in the range ]0,2^52[.
+      GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+      // Round positive {x} towards -Infinity.
+      var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+      GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+      var_x.Bind(Float64Sub(var_x.value(), one));
+    }
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    if (IsFloat64RoundUpSupported()) {
+      var_x.Bind(Float64RoundUp(x));
+      Goto(&return_x);
+    } else {
+      // Just return {x} unless its in the range ]-2^52,0[.
+      GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+      GotoUnless(Float64LessThan(x, zero), &return_x);
+
+      // Round negated {x} towards -Infinity and return result negated.
+      Node* minus_x = Float64Neg(x);
+      var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+      GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+      var_x.Bind(Float64Sub(var_x.value(), one));
+      Goto(&return_minus_x);
+    }
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::SmiFromWord32(Node* value) {
+  value = ChangeInt32ToIntPtr(value);
+  return WordShl(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiTag(Node* value) {
+  int32_t constant_value;
+  if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
+    return SmiConstant(Smi::FromInt(constant_value));
+  }
+  return WordShl(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiUntag(Node* value) {
+  return WordSar(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiToWord32(Node* value) {
+  Node* result = WordSar(value, SmiShiftBitsConstant());
+  if (Is64()) {
+    result = TruncateInt64ToInt32(result);
+  }
+  return result;
+}
+
+Node* CodeStubAssembler::SmiToFloat64(Node* value) {
+  return ChangeInt32ToFloat64(SmiToWord32(value));
+}
+
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
+
+Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
+  return IntPtrAddWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
+
+Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
+  return IntPtrSubWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+
+Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
+  return UintPtrGreaterThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
+  return IntPtrLessThan(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
+  return IntPtrLessThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
+  // TODO(bmeurer): Consider using Select once available.
+  Variable min(this, MachineRepresentation::kTagged);
+  Label if_a(this), if_b(this), join(this);
+  BranchIfSmiLessThan(a, b, &if_a, &if_b);
+  Bind(&if_a);
+  min.Bind(a);
+  Goto(&join);
+  Bind(&if_b);
+  min.Bind(b);
+  Goto(&join);
+  Bind(&join);
+  return min.value();
+}
+
+Node* CodeStubAssembler::WordIsSmi(Node* a) {
+  return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
+  return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
+                   IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
+                                              AllocationFlags flags,
+                                              Node* top_address,
+                                              Node* limit_address) {
+  Node* top = Load(MachineType::Pointer(), top_address);
+  Node* limit = Load(MachineType::Pointer(), limit_address);
+
+  // If there's not enough space, call the runtime.
+  Variable result(this, MachineRepresentation::kTagged);
+  Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
+  Label merge_runtime(this, &result);
+
+  Node* new_top = IntPtrAdd(top, size_in_bytes);
+  Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
+         &no_runtime_call);
+
+  Bind(&runtime_call);
+  // AllocateInTargetSpace does not use the context.
+  Node* context = SmiConstant(Smi::FromInt(0));
+
+  Node* runtime_result;
+  if (flags & kPretenured) {
+    Node* runtime_flags = SmiConstant(
+        Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+                     AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
+    runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
+                                 SmiTag(size_in_bytes), runtime_flags);
+  } else {
+    runtime_result = CallRuntime(Runtime::kAllocateInNewSpace, context,
+                                 SmiTag(size_in_bytes));
+  }
+  result.Bind(runtime_result);
+  Goto(&merge_runtime);
+
+  // When there is enough space, return `top' and bump it up.
+  Bind(&no_runtime_call);
+  Node* no_runtime_result = top;
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+                      new_top);
+  no_runtime_result = BitcastWordToTagged(
+      IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag)));
+  result.Bind(no_runtime_result);
+  Goto(&merge_runtime);
+
+  Bind(&merge_runtime);
+  return result.value();
+}
+
+Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
+                                            AllocationFlags flags,
+                                            Node* top_address,
+                                            Node* limit_address) {
+  Node* top = Load(MachineType::Pointer(), top_address);
+  Node* limit = Load(MachineType::Pointer(), limit_address);
+  Variable adjusted_size(this, MachineType::PointerRepresentation());
+  adjusted_size.Bind(size_in_bytes);
+  if (flags & kDoubleAlignment) {
+    // TODO(epertoso): Simd128 alignment.
+    Label aligned(this), not_aligned(this), merge(this, &adjusted_size);
+    Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
+           &aligned);
+
+    Bind(&not_aligned);
+    Node* not_aligned_size =
+        IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
+    adjusted_size.Bind(not_aligned_size);
+    Goto(&merge);
+
+    Bind(&aligned);
+    Goto(&merge);
+
+    Bind(&merge);
+  }
+
+  Variable address(this, MachineRepresentation::kTagged);
+  address.Bind(AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
+
+  Label needs_filler(this), doesnt_need_filler(this),
+      merge_address(this, &address);
+  Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &doesnt_need_filler,
+         &needs_filler);
+
+  Bind(&needs_filler);
+  // Store a filler and increase the address by kPointerSize.
+  // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
+  // it when Simd128 alignment is supported.
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
+                      LoadRoot(Heap::kOnePointerFillerMapRootIndex));
+  address.Bind(BitcastWordToTagged(
+      IntPtrAdd(address.value(), IntPtrConstant(kPointerSize))));
+  Goto(&merge_address);
+
+  Bind(&doesnt_need_filler);
+  Goto(&merge_address);
+
+  Bind(&merge_address);
+  // Update the top.
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+                      IntPtrAdd(top, adjusted_size.value()));
+  return address.value();
+}
+
+Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
+  bool const new_space = !(flags & kPretenured);
+  Node* top_address = ExternalConstant(
+      new_space
+          ? ExternalReference::new_space_allocation_top_address(isolate())
+          : ExternalReference::old_space_allocation_top_address(isolate()));
+  Node* limit_address = ExternalConstant(
+      new_space
+          ? ExternalReference::new_space_allocation_limit_address(isolate())
+          : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+#ifdef V8_HOST_ARCH_32_BIT
+  if (flags & kDoubleAlignment) {
+    return AllocateRawAligned(size_in_bytes, flags, top_address, limit_address);
+  }
+#endif
+
+  return AllocateRawUnaligned(size_in_bytes, flags, top_address, limit_address);
+}
+
+Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
+  return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
+}
+
+Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
+  return BitcastWordToTagged(IntPtrAdd(previous, offset));
+}
+
+Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
+  return InnerAllocate(previous, IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
+                                          MachineType rep) {
+  return Load(rep, buffer, IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
+                                         MachineType rep) {
+  return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
+  return Load(MachineType::Float64(), object,
+              IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMap(Node* object) {
+  return LoadObjectField(object, HeapObject::kMapOffset);
+}
+
+Node* CodeStubAssembler::LoadInstanceType(Node* object) {
+  return LoadMapInstanceType(LoadMap(object));
+}
+
+Node* CodeStubAssembler::LoadElements(Node* object) {
+  return LoadObjectField(object, JSObject::kElementsOffset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
+  return LoadObjectField(array, FixedArrayBase::kLengthOffset);
+}
+
+Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+  return Load(MachineType::Uint32(), map,
+              IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+  return LoadObjectField(map, Map::kDescriptorsOffset);
+}
+
+Node* CodeStubAssembler::LoadMapPrototype(Node* map) {
+  return LoadObjectField(map, Map::kPrototypeOffset);
+}
+
+Node* CodeStubAssembler::LoadNameHash(Node* name) {
+  return Load(MachineType::Uint32(), name,
+              IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::AllocateUninitializedFixedArray(Node* length) {
+  Node* header_size = IntPtrConstant(FixedArray::kHeaderSize);
+  Node* data_size = WordShl(length, IntPtrConstant(kPointerSizeLog2));
+  Node* total_size = IntPtrAdd(data_size, header_size);
+
+  Node* result = Allocate(total_size, kNone);
+  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kFixedArrayMapRootIndex));
+  StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
+      SmiTag(length));
+
+  return result;
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
+                                               int additional_offset,
+                                               ParameterMode parameter_mode) {
+  int32_t header_size =
+      FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
+  Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+                                        parameter_mode, header_size);
+  return Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kInstanceSizeOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadNativeContext(Node* context) {
+  return LoadFixedArrayElement(context,
+                               Int32Constant(Context::NATIVE_CONTEXT_INDEX));
+}
+
+Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
+                                                Node* native_context) {
+  return LoadFixedArrayElement(native_context,
+                               Int32Constant(Context::ArrayMapIndex(kind)));
+}
+
+Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kFloat64, object,
+      IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreObjectField(
+    Node* object, int offset, Node* value) {
+  return Store(MachineRepresentation::kTagged, object,
+               IntPtrConstant(offset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
+    Node* object, int offset, Node* value, MachineRepresentation rep) {
+  return StoreNoWriteBarrier(rep, object,
+                             IntPtrConstant(offset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kTagged, object,
+      IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
+}
+
+Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
+                                                Node* value,
+                                                WriteBarrierMode barrier_mode,
+                                                ParameterMode parameter_mode) {
+  DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
+         barrier_mode == UPDATE_WRITE_BARRIER);
+  Node* offset =
+      ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS, parameter_mode,
+                             FixedArray::kHeaderSize - kHeapObjectTag);
+  MachineRepresentation rep = MachineRepresentation::kTagged;
+  if (barrier_mode == SKIP_WRITE_BARRIER) {
+    return StoreNoWriteBarrier(rep, object, offset, value);
+  } else {
+    return Store(rep, object, offset, value);
+  }
+}
+
+Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
+    Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
+  Node* offset =
+      ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode,
+                             FixedArray::kHeaderSize - kHeapObjectTag);
+  MachineRepresentation rep = MachineRepresentation::kFloat64;
+  return StoreNoWriteBarrier(rep, object, offset, value);
+}
+
+Node* CodeStubAssembler::AllocateHeapNumber() {
+  Node* result = Allocate(HeapNumber::kSize, kNone);
+  StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
+  Node* result = AllocateHeapNumber();
+  StoreHeapNumberValue(result, value);
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateSeqOneByteString(int length) {
+  Node* result = Allocate(SeqOneByteString::SizeFor(length));
+  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
+  StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
+                                 SmiConstant(Smi::FromInt(length)));
+  StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
+                                 IntPtrConstant(String::kEmptyHashField));
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
+  Node* result = Allocate(SeqTwoByteString::SizeFor(length));
+  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+  StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
+                                 SmiConstant(Smi::FromInt(length)));
+  StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
+                                 IntPtrConstant(String::kEmptyHashField));
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
+                                         Node* capacity_node, Node* length_node,
+                                         compiler::Node* allocation_site,
+                                         ParameterMode mode) {
+  bool is_double = IsFastDoubleElementsKind(kind);
+  int base_size = JSArray::kSize + FixedArray::kHeaderSize;
+  int elements_offset = JSArray::kSize;
+
+  if (allocation_site != nullptr) {
+    base_size += AllocationMemento::kSize;
+    elements_offset += AllocationMemento::kSize;
+  }
+
+  int32_t capacity;
+  bool constant_capacity = ToInt32Constant(capacity_node, capacity);
+  Node* total_size =
+      ElementOffsetFromIndex(capacity_node, kind, mode, base_size);
+
+  // Allocate both array and elements object, and initialize the JSArray.
+  Heap* heap = isolate()->heap();
+  Node* array = Allocate(total_size);
+  StoreMapNoWriteBarrier(array, array_map);
+  Node* empty_properties =
+      HeapConstant(Handle<HeapObject>(heap->empty_fixed_array()));
+  StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset,
+                                 empty_properties);
+  StoreObjectFieldNoWriteBarrier(
+      array, JSArray::kLengthOffset,
+      mode == SMI_PARAMETERS ? length_node : SmiTag(length_node));
+
+  if (allocation_site != nullptr) {
+    InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
+  }
+
+  // Setup elements object.
+  Node* elements = InnerAllocate(array, elements_offset);
+  StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
+  Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
+                                     : heap->fixed_array_map());
+  StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
+  StoreObjectFieldNoWriteBarrier(
+      elements, FixedArray::kLengthOffset,
+      mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node));
+
+  int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+  Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value()));
+  Node* double_hole =
+      Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
+  DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32);
+  if (constant_capacity && capacity <= kElementLoopUnrollThreshold) {
+    for (int i = 0; i < capacity; ++i) {
+      if (is_double) {
+        Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+                                              first_element_offset);
+        // Don't use doubles to store the hole double, since manipulating the
+        // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+        // change its value on ia32 (the x87 stack is used to return values
+        // and stores to the stack silently clear the signalling bit).
+        //
+        // TODO(danno): When we have a Float32/Float64 wrapper class that
+        // preserves double bits during manipulation, remove this code/change
+        // this to an indexed Float64 store.
+        if (Is64()) {
+          StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
+                              double_hole);
+        } else {
+          StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
+                              double_hole);
+          offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+                                          first_element_offset + kPointerSize);
+          StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
+                              double_hole);
+        }
+      } else {
+        StoreFixedArrayElement(elements, Int32Constant(i), hole,
+                               SKIP_WRITE_BARRIER);
+      }
+    }
+  } else {
+    // TODO(danno): Add a loop for initialization
+    UNIMPLEMENTED();
+  }
+
+  return array;
+}
+
+void CodeStubAssembler::InitializeAllocationMemento(
+    compiler::Node* base_allocation, int base_allocation_size,
+    compiler::Node* allocation_site) {
+  StoreObjectFieldNoWriteBarrier(
+      base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
+      HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+  StoreObjectFieldNoWriteBarrier(
+      base_allocation,
+      AllocationMemento::kAllocationSiteOffset + base_allocation_size,
+      allocation_site);
+  if (FLAG_allocation_site_pretenuring) {
+    Node* count = LoadObjectField(allocation_site,
+                                  AllocationSite::kPretenureCreateCountOffset);
+    Node* incremented_count = IntPtrAdd(count, SmiConstant(Smi::FromInt(1)));
+    StoreObjectFieldNoWriteBarrier(allocation_site,
+                                   AllocationSite::kPretenureCreateCountOffset,
+                                   incremented_count);
+  }
+}
+
+Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
+  // We might need to loop once due to ToNumber conversion.
+  Variable var_value(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kFloat64);
+  Label loop(this, &var_value), done_loop(this, &var_result);
+  var_value.Bind(value);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {value}.
+    value = var_value.value();
+
+    // Check if the {value} is a Smi or a HeapObject.
+    Label if_valueissmi(this), if_valueisnotsmi(this);
+    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+    Bind(&if_valueissmi);
+    {
+      // Convert the Smi {value}.
+      var_result.Bind(SmiToFloat64(value));
+      Goto(&done_loop);
+    }
+
+    Bind(&if_valueisnotsmi);
+    {
+      // Check if {value} is a HeapNumber.
+      Label if_valueisheapnumber(this),
+          if_valueisnotheapnumber(this, Label::kDeferred);
+      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+             &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+      Bind(&if_valueisheapnumber);
+      {
+        // Load the floating point value.
+        var_result.Bind(LoadHeapNumberValue(value));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_valueisnotheapnumber);
+      {
+        // Convert the {value} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_value.Bind(CallStub(callable, context, value));
+        Goto(&loop);
+      }
+    }
+  }
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
+  // We might need to loop once due to ToNumber conversion.
+  Variable var_value(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kWord32);
+  Label loop(this, &var_value), done_loop(this, &var_result);
+  var_value.Bind(value);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {value}.
+    value = var_value.value();
+
+    // Check if the {value} is a Smi or a HeapObject.
+    Label if_valueissmi(this), if_valueisnotsmi(this);
+    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+    Bind(&if_valueissmi);
+    {
+      // Convert the Smi {value}.
+      var_result.Bind(SmiToWord32(value));
+      Goto(&done_loop);
+    }
+
+    Bind(&if_valueisnotsmi);
+    {
+      // Check if {value} is a HeapNumber.
+      Label if_valueisheapnumber(this),
+          if_valueisnotheapnumber(this, Label::kDeferred);
+      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+             &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+      Bind(&if_valueisheapnumber);
+      {
+        // Truncate the floating point value.
+        var_result.Bind(TruncateHeapNumberValueToWord32(value));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_valueisnotheapnumber);
+      {
+        // Convert the {value} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_value.Bind(CallStub(callable, context, value));
+        Goto(&loop);
+      }
+    }
+  }
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
+  Node* value = LoadHeapNumberValue(object);
+  return TruncateFloat64ToWord32(value);
+}
+
+Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
+  Node* value32 = RoundFloat64ToInt32(value);
+  Node* value64 = ChangeInt32ToFloat64(value32);
+
+  Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
+
+  Label if_valueisequal(this), if_valueisnotequal(this);
+  Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
+  Bind(&if_valueisequal);
+  {
+    GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
+    BranchIfInt32LessThan(Float64ExtractHighWord32(value), Int32Constant(0),
+                          &if_valueisheapnumber, &if_valueisint32);
+  }
+  Bind(&if_valueisnotequal);
+  Goto(&if_valueisheapnumber);
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Bind(&if_valueisint32);
+  {
+    if (Is64()) {
+      Node* result = SmiTag(ChangeInt32ToInt64(value32));
+      var_result.Bind(result);
+      Goto(&if_join);
+    } else {
+      Node* pair = Int32AddWithOverflow(value32, value32);
+      Node* overflow = Projection(1, pair);
+      Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+      Branch(overflow, &if_overflow, &if_notoverflow);
+      Bind(&if_overflow);
+      Goto(&if_valueisheapnumber);
+      Bind(&if_notoverflow);
+      {
+        Node* result = Projection(0, pair);
+        var_result.Bind(result);
+        Goto(&if_join);
+      }
+    }
+  }
+  Bind(&if_valueisheapnumber);
+  {
+    Node* result = AllocateHeapNumberWithValue(value);
+    var_result.Bind(result);
+    Goto(&if_join);
+  }
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
+  if (Is64()) {
+    return SmiTag(ChangeInt32ToInt64(value));
+  }
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Node* pair = Int32AddWithOverflow(value, value);
+  Node* overflow = Projection(1, pair);
+  Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
+      if_join(this);
+  Branch(overflow, &if_overflow, &if_notoverflow);
+  Bind(&if_overflow);
+  {
+    Node* value64 = ChangeInt32ToFloat64(value);
+    Node* result = AllocateHeapNumberWithValue(value64);
+    var_result.Bind(result);
+  }
+  Goto(&if_join);
+  Bind(&if_notoverflow);
+  {
+    Node* result = Projection(0, pair);
+    var_result.Bind(result);
+  }
+  Goto(&if_join);
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeUint32ToTagged(Node* value) {
+  Label if_overflow(this, Label::kDeferred), if_not_overflow(this),
+      if_join(this);
+  Variable var_result(this, MachineRepresentation::kTagged);
+  // If {value} > 2^31 - 1, we need to store it in a HeapNumber.
+  Branch(Int32LessThan(value, Int32Constant(0)), &if_overflow,
+         &if_not_overflow);
+  Bind(&if_not_overflow);
+  {
+    if (Is64()) {
+      var_result.Bind(SmiTag(ChangeUint32ToUint64(value)));
+    } else {
+      // If tagging {value} results in an overflow, we need to use a HeapNumber
+      // to represent it.
+      Node* pair = Int32AddWithOverflow(value, value);
+      Node* overflow = Projection(1, pair);
+      GotoIf(overflow, &if_overflow);
+
+      Node* result = Projection(0, pair);
+      var_result.Bind(result);
+    }
+  }
+  Goto(&if_join);
+
+  Bind(&if_overflow);
+  {
+    Node* float64_value = ChangeUint32ToFloat64(value);
+    var_result.Bind(AllocateHeapNumberWithValue(float64_value));
+  }
+  Goto(&if_join);
+
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
+                                      char const* method_name) {
+  Variable var_value(this, MachineRepresentation::kTagged);
+  var_value.Bind(value);
+
+  // Check if the {value} is a Smi or a HeapObject.
+  Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this),
+      if_valueisstring(this);
+  Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+  Bind(&if_valueisnotsmi);
+  {
+    // Load the instance type of the {value}.
+    Node* value_instance_type = LoadInstanceType(value);
+
+    // Check if the {value} is already String.
+    Label if_valueisnotstring(this, Label::kDeferred);
+    Branch(
+        Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
+        &if_valueisstring, &if_valueisnotstring);
+    Bind(&if_valueisnotstring);
+    {
+      // Check if the {value} is null.
+      Label if_valueisnullorundefined(this, Label::kDeferred),
+          if_valueisnotnullorundefined(this, Label::kDeferred),
+          if_valueisnotnull(this, Label::kDeferred);
+      Branch(WordEqual(value, NullConstant()), &if_valueisnullorundefined,
+             &if_valueisnotnull);
+      Bind(&if_valueisnotnull);
+      {
+        // Check if the {value} is undefined.
+        Branch(WordEqual(value, UndefinedConstant()),
+               &if_valueisnullorundefined, &if_valueisnotnullorundefined);
+        Bind(&if_valueisnotnullorundefined);
+        {
+          // Convert the {value} to a String.
+          Callable callable = CodeFactory::ToString(isolate());
+          var_value.Bind(CallStub(callable, context, value));
+          Goto(&if_valueisstring);
+        }
+      }
+
+      Bind(&if_valueisnullorundefined);
+      {
+        // The {value} is either null or undefined.
+        CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
+                    HeapConstant(factory()->NewStringFromAsciiChecked(
+                        method_name, TENURED)));
+        Goto(&if_valueisstring);  // Never reached.
+      }
+    }
+  }
+  Bind(&if_valueissmi);
+  {
+    // The {value} is a Smi, convert it to a String.
+    Callable callable = CodeFactory::NumberToString(isolate());
+    var_value.Bind(CallStub(callable, context, value));
+    Goto(&if_valueisstring);
+  }
+  Bind(&if_valueisstring);
+  return var_value.value();
+}
+
+Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
+  // Translate the {index} into a Word.
+  index = SmiToWord(index);
+
+  // We may need to loop in case of cons or sliced strings.
+  Variable var_index(this, MachineType::PointerRepresentation());
+  Variable var_result(this, MachineRepresentation::kWord32);
+  Variable var_string(this, MachineRepresentation::kTagged);
+  Variable* loop_vars[] = {&var_index, &var_string};
+  Label done_loop(this, &var_result), loop(this, 2, loop_vars);
+  var_string.Bind(string);
+  var_index.Bind(index);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {index}.
+    index = var_index.value();
+
+    // Load the current {string}.
+    string = var_string.value();
+
+    // Load the instance type of the {string}.
+    Node* string_instance_type = LoadInstanceType(string);
+
+    // Check if the {string} is a SeqString.
+    Label if_stringissequential(this), if_stringisnotsequential(this);
+    Branch(Word32Equal(Word32And(string_instance_type,
+                                 Int32Constant(kStringRepresentationMask)),
+                       Int32Constant(kSeqStringTag)),
+           &if_stringissequential, &if_stringisnotsequential);
+
+    Bind(&if_stringissequential);
+    {
+      // Check if the {string} is a TwoByteSeqString or a OneByteSeqString.
+      Label if_stringistwobyte(this), if_stringisonebyte(this);
+      Branch(Word32Equal(Word32And(string_instance_type,
+                                   Int32Constant(kStringEncodingMask)),
+                         Int32Constant(kTwoByteStringTag)),
+             &if_stringistwobyte, &if_stringisonebyte);
+
+      Bind(&if_stringisonebyte);
+      {
+        var_result.Bind(
+            Load(MachineType::Uint8(), string,
+                 IntPtrAdd(index, IntPtrConstant(SeqOneByteString::kHeaderSize -
+                                                 kHeapObjectTag))));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_stringistwobyte);
+      {
+        var_result.Bind(
+            Load(MachineType::Uint16(), string,
+                 IntPtrAdd(WordShl(index, IntPtrConstant(1)),
+                           IntPtrConstant(SeqTwoByteString::kHeaderSize -
+                                          kHeapObjectTag))));
+        Goto(&done_loop);
+      }
+    }
+
+    Bind(&if_stringisnotsequential);
+    {
+      // Check if the {string} is a ConsString.
+      Label if_stringiscons(this), if_stringisnotcons(this);
+      Branch(Word32Equal(Word32And(string_instance_type,
+                                   Int32Constant(kStringRepresentationMask)),
+                         Int32Constant(kConsStringTag)),
+             &if_stringiscons, &if_stringisnotcons);
+
+      Bind(&if_stringiscons);
+      {
+        // Check whether the right hand side is the empty string (i.e. if
+        // this is really a flat string in a cons string). If that is not
+        // the case we flatten the string first.
+        Label if_rhsisempty(this), if_rhsisnotempty(this, Label::kDeferred);
+        Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
+        Branch(WordEqual(rhs, EmptyStringConstant()), &if_rhsisempty,
+               &if_rhsisnotempty);
+
+        Bind(&if_rhsisempty);
+        {
+          // Just operate on the left hand side of the {string}.
+          var_string.Bind(LoadObjectField(string, ConsString::kFirstOffset));
+          Goto(&loop);
+        }
+
+        Bind(&if_rhsisnotempty);
+        {
+          // Flatten the {string} and lookup in the resulting string.
+          var_string.Bind(CallRuntime(Runtime::kFlattenString,
+                                      NoContextConstant(), string));
+          Goto(&loop);
+        }
+      }
+
+      Bind(&if_stringisnotcons);
+      {
+        // Check if the {string} is an ExternalString.
+        Label if_stringisexternal(this), if_stringisnotexternal(this);
+        Branch(Word32Equal(Word32And(string_instance_type,
+                                     Int32Constant(kStringRepresentationMask)),
+                           Int32Constant(kExternalStringTag)),
+               &if_stringisexternal, &if_stringisnotexternal);
+
+        Bind(&if_stringisexternal);
+        {
+          // Check if the {string} is a short external string.
+          Label if_stringisshort(this),
+              if_stringisnotshort(this, Label::kDeferred);
+          Branch(Word32Equal(Word32And(string_instance_type,
+                                       Int32Constant(kShortExternalStringMask)),
+                             Int32Constant(0)),
+                 &if_stringisshort, &if_stringisnotshort);
+
+          Bind(&if_stringisshort);
+          {
+            // Load the actual resource data from the {string}.
+            Node* string_resource_data =
+                LoadObjectField(string, ExternalString::kResourceDataOffset,
+                                MachineType::Pointer());
+
+            // Check if the {string} is a TwoByteExternalString or a
+            // OneByteExternalString.
+            Label if_stringistwobyte(this), if_stringisonebyte(this);
+            Branch(Word32Equal(Word32And(string_instance_type,
+                                         Int32Constant(kStringEncodingMask)),
+                               Int32Constant(kTwoByteStringTag)),
+                   &if_stringistwobyte, &if_stringisonebyte);
+
+            Bind(&if_stringisonebyte);
+            {
+              var_result.Bind(
+                  Load(MachineType::Uint8(), string_resource_data, index));
+              Goto(&done_loop);
+            }
+
+            Bind(&if_stringistwobyte);
+            {
+              var_result.Bind(Load(MachineType::Uint16(), string_resource_data,
+                                   WordShl(index, IntPtrConstant(1))));
+              Goto(&done_loop);
+            }
+          }
+
+          Bind(&if_stringisnotshort);
+          {
+            // The {string} might be compressed, call the runtime.
+            var_result.Bind(SmiToWord32(
+                CallRuntime(Runtime::kExternalStringGetChar,
+                            NoContextConstant(), string, SmiTag(index))));
+            Goto(&done_loop);
+          }
+        }
+
+        Bind(&if_stringisnotexternal);
+        {
+          // The {string} is a SlicedString, continue with its parent.
+          Node* string_offset =
+              SmiToWord(LoadObjectField(string, SlicedString::kOffsetOffset));
+          Node* string_parent =
+              LoadObjectField(string, SlicedString::kParentOffset);
+          var_index.Bind(IntPtrAdd(index, string_offset));
+          var_string.Bind(string_parent);
+          Goto(&loop);
+        }
+      }
+    }
+  }
+
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::StringFromCharCode(Node* code) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+
+  // Check if the {code} is a one-byte char code.
+  Label if_codeisonebyte(this), if_codeistwobyte(this, Label::kDeferred),
+      if_done(this);
+  Branch(Int32LessThanOrEqual(code, Int32Constant(String::kMaxOneByteCharCode)),
+         &if_codeisonebyte, &if_codeistwobyte);
+  Bind(&if_codeisonebyte);
+  {
+    // Load the isolate wide single character string cache.
+    Node* cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex);
+
+    // Check if we have an entry for the {code} in the single character string
+    // cache already.
+    Label if_entryisundefined(this, Label::kDeferred),
+        if_entryisnotundefined(this);
+    Node* entry = LoadFixedArrayElement(cache, code);
+    Branch(WordEqual(entry, UndefinedConstant()), &if_entryisundefined,
+           &if_entryisnotundefined);
+
+    Bind(&if_entryisundefined);
+    {
+      // Allocate a new SeqOneByteString for {code} and store it in the {cache}.
+      Node* result = AllocateSeqOneByteString(1);
+      StoreNoWriteBarrier(
+          MachineRepresentation::kWord8, result,
+          IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag), code);
+      StoreFixedArrayElement(cache, code, result);
+      var_result.Bind(result);
+      Goto(&if_done);
+    }
+
+    Bind(&if_entryisnotundefined);
+    {
+      // Return the entry from the {cache}.
+      var_result.Bind(entry);
+      Goto(&if_done);
+    }
+  }
+
+  Bind(&if_codeistwobyte);
+  {
+    // Allocate a new SeqTwoByteString for {code}.
+    Node* result = AllocateSeqTwoByteString(1);
+    StoreNoWriteBarrier(
+        MachineRepresentation::kWord16, result,
+        IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), code);
+    var_result.Bind(result);
+    Goto(&if_done);
+  }
+
+  Bind(&if_done);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
+                                        uint32_t mask) {
+  return Word32Shr(Word32And(word32, Int32Constant(mask)),
+                   Int32Constant(shift));
+}
+
+void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
+                                  Variable* var_index, Label* if_keyisunique,
+                                  Label* call_runtime) {
+  DCHECK_EQ(MachineRepresentation::kWord32, var_index->rep());
+
+  Label if_keyissmi(this), if_keyisnotsmi(this);
+  Branch(WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
+  Bind(&if_keyissmi);
+  {
+    // Negative smi keys are named properties. Handle in the runtime.
+    Label if_keyispositive(this);
+    Branch(WordIsPositiveSmi(key), &if_keyispositive, call_runtime);
+    Bind(&if_keyispositive);
+
+    var_index->Bind(SmiToWord32(key));
+    Goto(if_keyisindex);
+  }
+
+  Bind(&if_keyisnotsmi);
+
+  Node* key_instance_type = LoadInstanceType(key);
+  Label if_keyisnotsymbol(this);
+  Branch(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
+         if_keyisunique, &if_keyisnotsymbol);
+  Bind(&if_keyisnotsymbol);
+  {
+    Label if_keyisinternalized(this);
+    Node* bits =
+        WordAnd(key_instance_type,
+                Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
+    Branch(Word32Equal(bits, Int32Constant(kStringTag | kInternalizedTag)),
+           &if_keyisinternalized, call_runtime);
+    Bind(&if_keyisinternalized);
+
+    // Check whether the key is an array index passed in as string. Handle
+    // uniform with smi keys if so.
+    // TODO(verwaest): Also support non-internalized strings.
+    Node* hash = LoadNameHash(key);
+    Node* bit =
+        Word32And(hash, Int32Constant(internal::Name::kIsNotArrayIndexMask));
+    Label if_isarrayindex(this);
+    Branch(Word32Equal(bit, Int32Constant(0)), &if_isarrayindex,
+           if_keyisunique);
+    Bind(&if_isarrayindex);
+    var_index->Bind(BitFieldDecode<internal::Name::ArrayIndexValueBits>(hash));
+    Goto(if_keyisindex);
+  }
+}
+
+void CodeStubAssembler::TryLookupProperty(Node* object, Node* map,
+                                          Node* instance_type, Node* name,
+                                          Label* if_found, Label* if_not_found,
+                                          Label* call_runtime) {
+  {
+    Label if_objectissimple(this);
+    Branch(Int32LessThanOrEqual(instance_type,
+                                Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+           call_runtime, &if_objectissimple);
+    Bind(&if_objectissimple);
+  }
+
+  // TODO(verwaest): Perform a dictonary lookup on slow-mode receivers.
+  Node* bit_field3 = LoadMapBitField3(map);
+  Node* bit = BitFieldDecode<Map::DictionaryMap>(bit_field3);
+  Label if_isfastmap(this);
+  Branch(Word32Equal(bit, Int32Constant(0)), &if_isfastmap, call_runtime);
+  Bind(&if_isfastmap);
+  Node* nof = BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+  // Bail out to the runtime for large numbers of own descriptors. The stub only
+  // does linear search, which becomes too expensive in that case.
+  {
+    static const int32_t kMaxLinear = 210;
+    Label above_max(this), below_max(this);
+    Branch(Int32LessThanOrEqual(nof, Int32Constant(kMaxLinear)), &below_max,
+           call_runtime);
+    Bind(&below_max);
+  }
+  Node* descriptors = LoadMapDescriptors(map);
+
+  Variable var_descriptor(this, MachineRepresentation::kWord32);
+  Label loop(this, &var_descriptor);
+  var_descriptor.Bind(Int32Constant(0));
+  Goto(&loop);
+  Bind(&loop);
+  {
+    Node* index = var_descriptor.value();
+    Node* offset = Int32Constant(DescriptorArray::ToKeyIndex(0));
+    Node* factor = Int32Constant(DescriptorArray::kDescriptorSize);
+    Label if_notdone(this);
+    Branch(Word32Equal(index, nof), if_not_found, &if_notdone);
+    Bind(&if_notdone);
+    {
+      Node* array_index = Int32Add(offset, Int32Mul(index, factor));
+      Node* current = LoadFixedArrayElement(descriptors, array_index);
+      Label if_unequal(this);
+      Branch(WordEqual(current, name), if_found, &if_unequal);
+      Bind(&if_unequal);
+
+      var_descriptor.Bind(Int32Add(index, Int32Constant(1)));
+      Goto(&loop);
+    }
+  }
+}
+
+void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
+                                         Node* instance_type, Node* index,
+                                         Label* if_found, Label* if_not_found,
+                                         Label* call_runtime) {
+  {
+    Label if_objectissimple(this);
+    Branch(Int32LessThanOrEqual(instance_type,
+                                Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+           call_runtime, &if_objectissimple);
+    Bind(&if_objectissimple);
+  }
+
+  Node* bit_field2 = LoadMapBitField2(map);
+  Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+
+  // TODO(verwaest): Support other elements kinds as well.
+  Label if_isobjectorsmi(this);
+  Branch(
+      Int32LessThanOrEqual(elements_kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
+      &if_isobjectorsmi, call_runtime);
+  Bind(&if_isobjectorsmi);
+  {
+    Node* elements = LoadElements(object);
+    Node* length = LoadFixedArrayBaseLength(elements);
+
+    Label if_iskeyinrange(this);
+    Branch(Int32LessThan(index, SmiToWord32(length)), &if_iskeyinrange,
+           if_not_found);
+
+    Bind(&if_iskeyinrange);
+    Node* element = LoadFixedArrayElement(elements, index);
+    Node* the_hole = LoadRoot(Heap::kTheHoleValueRootIndex);
+    Branch(WordEqual(element, the_hole), if_not_found, if_found);
+  }
+}
+
+Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
+                                             Node* object) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Label return_false(this), return_true(this),
+      return_runtime(this, Label::kDeferred), return_result(this);
+
+  // Goto runtime if {object} is a Smi.
+  GotoIf(WordIsSmi(object), &return_runtime);
+
+  // Load map of {object}.
+  Node* object_map = LoadMap(object);
+
+  // Lookup the {callable} and {object} map in the global instanceof cache.
+  // Note: This is safe because we clear the global instanceof cache whenever
+  // we change the prototype of any object.
+  Node* instanceof_cache_function =
+      LoadRoot(Heap::kInstanceofCacheFunctionRootIndex);
+  Node* instanceof_cache_map = LoadRoot(Heap::kInstanceofCacheMapRootIndex);
+  {
+    Label instanceof_cache_miss(this);
+    GotoUnless(WordEqual(instanceof_cache_function, callable),
+               &instanceof_cache_miss);
+    GotoUnless(WordEqual(instanceof_cache_map, object_map),
+               &instanceof_cache_miss);
+    var_result.Bind(LoadRoot(Heap::kInstanceofCacheAnswerRootIndex));
+    Goto(&return_result);
+    Bind(&instanceof_cache_miss);
+  }
+
+  // Goto runtime if {callable} is a Smi.
+  GotoIf(WordIsSmi(callable), &return_runtime);
+
+  // Load map of {callable}.
+  Node* callable_map = LoadMap(callable);
+
+  // Goto runtime if {callable} is not a JSFunction.
+  Node* callable_instance_type = LoadMapInstanceType(callable_map);
+  GotoUnless(
+      Word32Equal(callable_instance_type, Int32Constant(JS_FUNCTION_TYPE)),
+      &return_runtime);
+
+  // Goto runtime if {callable} is not a constructor or has
+  // a non-instance "prototype".
+  Node* callable_bitfield = LoadMapBitField(callable_map);
+  GotoUnless(
+      Word32Equal(Word32And(callable_bitfield,
+                            Int32Constant((1 << Map::kHasNonInstancePrototype) |
+                                          (1 << Map::kIsConstructor))),
+                  Int32Constant(1 << Map::kIsConstructor)),
+      &return_runtime);
+
+  // Get the "prototype" (or initial map) of the {callable}.
+  Node* callable_prototype =
+      LoadObjectField(callable, JSFunction::kPrototypeOrInitialMapOffset);
+  {
+    Variable var_callable_prototype(this, MachineRepresentation::kTagged);
+    Label callable_prototype_valid(this);
+    var_callable_prototype.Bind(callable_prototype);
+
+    // Resolve the "prototype" if the {callable} has an initial map.  Afterwards
+    // the {callable_prototype} will be either the JSReceiver prototype object
+    // or the hole value, which means that no instances of the {callable} were
+    // created so far and hence we should return false.
+    Node* callable_prototype_instance_type =
+        LoadInstanceType(callable_prototype);
+    GotoUnless(
+        Word32Equal(callable_prototype_instance_type, Int32Constant(MAP_TYPE)),
+        &callable_prototype_valid);
+    var_callable_prototype.Bind(
+        LoadObjectField(callable_prototype, Map::kPrototypeOffset));
+    Goto(&callable_prototype_valid);
+    Bind(&callable_prototype_valid);
+    callable_prototype = var_callable_prototype.value();
+  }
+
+  // Update the global instanceof cache with the current {object} map and
+  // {callable}.  The cached answer will be set when it is known below.
+  StoreRoot(Heap::kInstanceofCacheFunctionRootIndex, callable);
+  StoreRoot(Heap::kInstanceofCacheMapRootIndex, object_map);
+
+  // Loop through the prototype chain looking for the {callable} prototype.
+  Variable var_object_map(this, MachineRepresentation::kTagged);
+  var_object_map.Bind(object_map);
+  Label loop(this, &var_object_map);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    Node* object_map = var_object_map.value();
+
+    // Check if the current {object} needs to be access checked.
+    Node* object_bitfield = LoadMapBitField(object_map);
+    GotoUnless(
+        Word32Equal(Word32And(object_bitfield,
+                              Int32Constant(1 << Map::kIsAccessCheckNeeded)),
+                    Int32Constant(0)),
+        &return_runtime);
+
+    // Check if the current {object} is a proxy.
+    Node* object_instance_type = LoadMapInstanceType(object_map);
+    GotoIf(Word32Equal(object_instance_type, Int32Constant(JS_PROXY_TYPE)),
+           &return_runtime);
+
+    // Check the current {object} prototype.
+    Node* object_prototype = LoadMapPrototype(object_map);
+    GotoIf(WordEqual(object_prototype, callable_prototype), &return_true);
+    GotoIf(WordEqual(object_prototype, NullConstant()), &return_false);
+
+    // Continue with the prototype.
+    var_object_map.Bind(LoadMap(object_prototype));
+    Goto(&loop);
+  }
+
+  Bind(&return_true);
+  StoreRoot(Heap::kInstanceofCacheAnswerRootIndex, BooleanConstant(true));
+  var_result.Bind(BooleanConstant(true));
+  Goto(&return_result);
+
+  Bind(&return_false);
+  StoreRoot(Heap::kInstanceofCacheAnswerRootIndex, BooleanConstant(false));
+  var_result.Bind(BooleanConstant(false));
+  Goto(&return_result);
+
+  Bind(&return_runtime);
+  {
+    // Invalidate the global instanceof cache.
+    StoreRoot(Heap::kInstanceofCacheFunctionRootIndex, SmiConstant(0));
+    // Fallback to the runtime implementation.
+    var_result.Bind(
+        CallRuntime(Runtime::kOrdinaryHasInstance, context, callable, object));
+  }
+  Goto(&return_result);
+
+  Bind(&return_result);
+  return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
+                                                          ElementsKind kind,
+                                                          ParameterMode mode,
+                                                          int base_size) {
+  bool is_double = IsFastDoubleElementsKind(kind);
+  int element_size_shift = is_double ? kDoubleSizeLog2 : kPointerSizeLog2;
+  int element_size = 1 << element_size_shift;
+  int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+  int32_t index = 0;
+  bool constant_index = false;
+  if (mode == SMI_PARAMETERS) {
+    element_size_shift -= kSmiShiftBits;
+    intptr_t temp = 0;
+    constant_index = ToIntPtrConstant(index_node, temp);
+    index = temp >> kSmiShiftBits;
+  } else {
+    constant_index = ToInt32Constant(index_node, index);
+  }
+  if (constant_index) {
+    return IntPtrConstant(base_size + element_size * index);
+  }
+  if (Is64() && mode == INTEGER_PARAMETERS) {
+    index_node = ChangeInt32ToInt64(index_node);
+  }
+  if (base_size == 0) {
+    return (element_size_shift >= 0)
+               ? WordShl(index_node, IntPtrConstant(element_size_shift))
+               : WordShr(index_node, IntPtrConstant(-element_size_shift));
+  }
+  return IntPtrAdd(
+      IntPtrConstant(base_size),
+      (element_size_shift >= 0)
+          ? WordShl(index_node, IntPtrConstant(element_size_shift))
+          : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/code-stub-assembler.h b/src/code-stub-assembler.h
new file mode 100644
index 0000000..891fd24
--- /dev/null
+++ b/src/code-stub-assembler.h
@@ -0,0 +1,264 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_STUB_ASSEMBLER_H_
+#define V8_CODE_STUB_ASSEMBLER_H_
+
+#include "src/compiler/code-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class CallInterfaceDescriptor;
+
+// Provides JavaScript-specific "macro-assembler" functionality on top of the
+// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
+// it's possible to add JavaScript-specific useful CodeAssembler "macros"
+// without modifying files in the compiler directory (and requiring a review
+// from a compiler directory OWNER).
+class CodeStubAssembler : public compiler::CodeAssembler {
+ public:
+  // Create with CallStub linkage.
+  // |result_size| specifies the number of results returned by the stub.
+  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+  CodeStubAssembler(Isolate* isolate, Zone* zone,
+                    const CallInterfaceDescriptor& descriptor,
+                    Code::Flags flags, const char* name,
+                    size_t result_size = 1);
+
+  // Create with JSCall linkage.
+  CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+                    Code::Flags flags, const char* name);
+
+  enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS };
+
+  compiler::Node* BooleanMapConstant();
+  compiler::Node* EmptyStringConstant();
+  compiler::Node* HeapNumberMapConstant();
+  compiler::Node* NoContextConstant();
+  compiler::Node* NullConstant();
+  compiler::Node* UndefinedConstant();
+  compiler::Node* StaleRegisterConstant();
+
+  // Float64 operations.
+  compiler::Node* Float64Ceil(compiler::Node* x);
+  compiler::Node* Float64Floor(compiler::Node* x);
+  compiler::Node* Float64Round(compiler::Node* x);
+  compiler::Node* Float64Trunc(compiler::Node* x);
+
+  // Tag a Word as a Smi value.
+  compiler::Node* SmiTag(compiler::Node* value);
+  // Untag a Smi value as a Word.
+  compiler::Node* SmiUntag(compiler::Node* value);
+
+  // Smi conversions.
+  compiler::Node* SmiToFloat64(compiler::Node* value);
+  compiler::Node* SmiFromWord32(compiler::Node* value);
+  compiler::Node* SmiToWord(compiler::Node* value) { return SmiUntag(value); }
+  compiler::Node* SmiToWord32(compiler::Node* value);
+
+  // Smi operations.
+  compiler::Node* SmiAdd(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiAddWithOverflow(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiSubWithOverflow(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
+  compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
+
+  // Allocate an object of the given size.
+  compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
+  compiler::Node* Allocate(int size, AllocationFlags flags = kNone);
+  compiler::Node* InnerAllocate(compiler::Node* previous, int offset);
+  compiler::Node* InnerAllocate(compiler::Node* previous,
+                                compiler::Node* offset);
+
+  // Check a value for smi-ness
+  compiler::Node* WordIsSmi(compiler::Node* a);
+  // Check that the value is a positive smi.
+  compiler::Node* WordIsPositiveSmi(compiler::Node* a);
+
+  void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
+                           Label* if_false) {
+    BranchIf(SmiLessThan(a, b), if_true, if_false);
+  }
+
+  void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
+                                  Label* if_true, Label* if_false) {
+    BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
+  }
+
+  void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
+                            Label* if_false) {
+    BranchIfFloat64Equal(value, value, if_false, if_true);
+  }
+
+  // Load an object pointer from a buffer that isn't in the heap.
+  compiler::Node* LoadBufferObject(compiler::Node* buffer, int offset,
+                                   MachineType rep = MachineType::AnyTagged());
+  // Load a field from an object on the heap.
+  compiler::Node* LoadObjectField(compiler::Node* object, int offset,
+                                  MachineType rep = MachineType::AnyTagged());
+  // Load the floating point value of a HeapNumber.
+  compiler::Node* LoadHeapNumberValue(compiler::Node* object);
+  // Load the Map of an HeapObject.
+  compiler::Node* LoadMap(compiler::Node* object);
+  // Load the instance type of an HeapObject.
+  compiler::Node* LoadInstanceType(compiler::Node* object);
+  // Load the elements backing store of a JSObject.
+  compiler::Node* LoadElements(compiler::Node* object);
+  // Load the length of a fixed array base instance.
+  compiler::Node* LoadFixedArrayBaseLength(compiler::Node* array);
+  // Load the bit field of a Map.
+  compiler::Node* LoadMapBitField(compiler::Node* map);
+  // Load bit field 2 of a map.
+  compiler::Node* LoadMapBitField2(compiler::Node* map);
+  // Load bit field 3 of a map.
+  compiler::Node* LoadMapBitField3(compiler::Node* map);
+  // Load the instance type of a map.
+  compiler::Node* LoadMapInstanceType(compiler::Node* map);
+  // Load the instance descriptors of a map.
+  compiler::Node* LoadMapDescriptors(compiler::Node* map);
+  // Load the prototype of a map.
+  compiler::Node* LoadMapPrototype(compiler::Node* map);
+
+  // Load the hash field of a name.
+  compiler::Node* LoadNameHash(compiler::Node* name);
+  // Load the instance size of a Map.
+  compiler::Node* LoadMapInstanceSize(compiler::Node* map);
+
+  compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length);
+
+  // Load an array element from a FixedArray.
+  compiler::Node* LoadFixedArrayElement(
+      compiler::Node* object, compiler::Node* int32_index,
+      int additional_offset = 0,
+      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+
+  // Context manipulation
+  compiler::Node* LoadNativeContext(compiler::Node* context);
+
+  compiler::Node* LoadJSArrayElementsMap(ElementsKind kind,
+                                         compiler::Node* native_context);
+
+  // Store the floating point value of a HeapNumber.
+  compiler::Node* StoreHeapNumberValue(compiler::Node* object,
+                                       compiler::Node* value);
+  // Store a field to an object on the heap.
+  compiler::Node* StoreObjectField(
+      compiler::Node* object, int offset, compiler::Node* value);
+  compiler::Node* StoreObjectFieldNoWriteBarrier(
+      compiler::Node* object, int offset, compiler::Node* value,
+      MachineRepresentation rep = MachineRepresentation::kTagged);
+  // Store the Map of an HeapObject.
+  compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
+                                         compiler::Node* map);
+  // Store an array element to a FixedArray.
+  compiler::Node* StoreFixedArrayElement(
+      compiler::Node* object, compiler::Node* index, compiler::Node* value,
+      WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+
+  compiler::Node* StoreFixedDoubleArrayElement(
+      compiler::Node* object, compiler::Node* index, compiler::Node* value,
+      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+
+  // Allocate a HeapNumber without initializing its value.
+  compiler::Node* AllocateHeapNumber();
+  // Allocate a HeapNumber with a specific value.
+  compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value);
+  // Allocate a SeqOneByteString with the given length.
+  compiler::Node* AllocateSeqOneByteString(int length);
+  // Allocate a SeqTwoByteString with the given length.
+  compiler::Node* AllocateSeqTwoByteString(int length);
+  // Allocated an JSArray
+  compiler::Node* AllocateJSArray(ElementsKind kind, compiler::Node* array_map,
+                                  compiler::Node* capacity,
+                                  compiler::Node* length,
+                                  compiler::Node* allocation_site = nullptr,
+                                  ParameterMode mode = INTEGER_PARAMETERS);
+
+  // Allocation site manipulation
+  void InitializeAllocationMemento(compiler::Node* base_allocation,
+                                   int base_allocation_size,
+                                   compiler::Node* allocation_site);
+
+  compiler::Node* TruncateTaggedToFloat64(compiler::Node* context,
+                                          compiler::Node* value);
+  compiler::Node* TruncateTaggedToWord32(compiler::Node* context,
+                                         compiler::Node* value);
+  // Truncate the floating point value of a HeapNumber to an Int32.
+  compiler::Node* TruncateHeapNumberValueToWord32(compiler::Node* object);
+
+  // Conversions.
+  compiler::Node* ChangeFloat64ToTagged(compiler::Node* value);
+  compiler::Node* ChangeInt32ToTagged(compiler::Node* value);
+  compiler::Node* ChangeUint32ToTagged(compiler::Node* value);
+
+  // Type conversions.
+  // Throws a TypeError for {method_name} if {value} is not coercible to Object,
+  // or returns the {value} converted to a String otherwise.
+  compiler::Node* ToThisString(compiler::Node* context, compiler::Node* value,
+                               char const* method_name);
+
+  // String helpers.
+  // Load a character from a String (might flatten a ConsString).
+  compiler::Node* StringCharCodeAt(compiler::Node* string,
+                                   compiler::Node* smi_index);
+  // Return the single character string with only {code}.
+  compiler::Node* StringFromCharCode(compiler::Node* code);
+
+  // Returns a node that is true if the given bit is set in |word32|.
+  template <typename T>
+  compiler::Node* BitFieldDecode(compiler::Node* word32) {
+    return BitFieldDecode(word32, T::kShift, T::kMask);
+  }
+
+  compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
+                                 uint32_t mask);
+
+  // Various building blocks for stubs doing property lookups.
+  void TryToName(compiler::Node* key, Label* if_keyisindex, Variable* var_index,
+                 Label* if_keyisunique, Label* call_runtime);
+
+  void TryLookupProperty(compiler::Node* object, compiler::Node* map,
+                         compiler::Node* instance_type, compiler::Node* name,
+                         Label* if_found, Label* if_not_found,
+                         Label* call_runtime);
+
+  void TryLookupElement(compiler::Node* object, compiler::Node* map,
+                        compiler::Node* instance_type, compiler::Node* index,
+                        Label* if_found, Label* if_not_found,
+                        Label* call_runtime);
+
+  // Instanceof helpers.
+  // ES6 section 7.3.19 OrdinaryHasInstance (C, O)
+  compiler::Node* OrdinaryHasInstance(compiler::Node* context,
+                                      compiler::Node* callable,
+                                      compiler::Node* object);
+
+ private:
+  compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
+                                         ElementsKind kind, ParameterMode mode,
+                                         int base_size = 0);
+
+  compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
+                                     AllocationFlags flags,
+                                     compiler::Node* top_address,
+                                     compiler::Node* limit_address);
+  compiler::Node* AllocateRawUnaligned(compiler::Node* size_in_bytes,
+                                       AllocationFlags flags,
+                                       compiler::Node* top_adddress,
+                                       compiler::Node* limit_address);
+
+  static const int kElementLoopUnrollThreshold = 8;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CODE_STUB_ASSEMBLER_H_
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 1d2fb81..6680e66 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -97,26 +97,6 @@
   HValue* BuildInternalArrayConstructor(ElementsKind kind,
                                         ArgumentClass argument_class);
 
-  // BuildCheckAndInstallOptimizedCode emits code to install the optimized
-  // function found in the optimized code map at map_index in js_function, if
-  // the function at map_index matches the given native_context. Builder is
-  // left in the "Then()" state after the install.
-  void BuildCheckAndInstallOptimizedCode(HValue* js_function,
-                                         HValue* native_context,
-                                         IfBuilder* builder,
-                                         HValue* optimized_map,
-                                         HValue* map_index);
-  void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
-                                 HValue* code_object, HValue* literals);
-  void BuildInstallCode(HValue* js_function, HValue* shared_info);
-
-  HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
-                                         HValue* iterator,
-                                         int field_offset);
-  void BuildInstallFromOptimizedCodeMap(HValue* js_function,
-                                        HValue* shared_info,
-                                        HValue* native_context);
-
   HValue* BuildToString(HValue* input, bool convert);
   HValue* BuildToPrimitive(HValue* input, HValue* input_map);
 
@@ -298,8 +278,8 @@
     timer.Start();
   }
   Zone zone(isolate->allocator());
-  CompilationInfo info(CodeStub::MajorName(stub->MajorKey()), isolate, &zone,
-                       stub->GetCodeFlags());
+  CompilationInfo info(CStrVector(CodeStub::MajorName(stub->MajorKey())),
+                       isolate, &zone, stub->GetCodeFlags());
   // Parameter count is number of stack parameters.
   int parameter_count = descriptor.GetStackParameterCount();
   if (descriptor.function_mode() == NOT_JS_FUNCTION_STUB_MODE) {
@@ -463,7 +443,7 @@
         JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
     HValue* result =
         Add<HAllocate>(Add<HConstant>(result_size), HType::JSObject(),
-                       NOT_TENURED, JS_REGEXP_TYPE);
+                       NOT_TENURED, JS_REGEXP_TYPE, graph()->GetConstant0());
     Add<HStoreNamedField>(
         result, HObjectAccess::ForMap(),
         Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap()));
@@ -570,87 +550,15 @@
 
 
 template <>
-HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
-  HValue* undefined = graph()->GetConstantUndefined();
-  HValue* closure = GetParameter(0);
-  HValue* literal_index = GetParameter(1);
-
-  HValue* literals_array = Add<HLoadNamedField>(
-      closure, nullptr, HObjectAccess::ForLiteralsPointer());
-
-  HInstruction* allocation_site = Add<HLoadKeyed>(
-      literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
-      NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
-
-  IfBuilder checker(this);
-  checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
-                                                    undefined);
-  checker.And();
-
-  HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
-      AllocationSite::kTransitionInfoOffset);
-  HInstruction* boilerplate =
-      Add<HLoadNamedField>(allocation_site, nullptr, access);
-
-  int length = casted_stub()->length();
-  if (length == 0) {
-    // Empty objects have some slack added to them.
-    length = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
-  }
-  int size = JSObject::kHeaderSize + length * kPointerSize;
-  int object_size = size;
-  if (FLAG_allocation_site_pretenuring) {
-    size += AllocationMemento::kSize;
-  }
-
-  HValue* boilerplate_map =
-      Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap());
-  HValue* boilerplate_size = Add<HLoadNamedField>(
-      boilerplate_map, nullptr, HObjectAccess::ForMapInstanceSize());
-  HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
-  checker.If<HCompareNumericAndBranch>(boilerplate_size,
-                                       size_in_words, Token::EQ);
-  checker.Then();
-
-  HValue* size_in_bytes = Add<HConstant>(size);
-
-  HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
-      NOT_TENURED, JS_OBJECT_TYPE);
-
-  for (int i = 0; i < object_size; i += kPointerSize) {
-    HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(i);
-    Add<HStoreNamedField>(object, access,
-                          Add<HLoadNamedField>(boilerplate, nullptr, access));
-  }
-
-  DCHECK(FLAG_allocation_site_pretenuring || (size == object_size));
-  if (FLAG_allocation_site_pretenuring) {
-    BuildCreateAllocationMemento(
-        object, Add<HConstant>(object_size), allocation_site);
-  }
-
-  environment()->Push(object);
-  checker.ElseDeopt(Deoptimizer::kUninitializedBoilerplateInFastClone);
-  checker.End();
-
-  return environment()->Pop();
-}
-
-
-Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
 HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
   // This stub is performance sensitive, the generated code must be tuned
   // so that it doesn't build an eager frame.
   info()->MarkMustNotHaveEagerFrame();
 
   HValue* size = Add<HConstant>(AllocationSite::kSize);
-  HInstruction* object = Add<HAllocate>(size, HType::JSObject(), TENURED,
-      JS_OBJECT_TYPE);
+  HInstruction* object =
+      Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE,
+                     graph()->GetConstant0());
 
   // Store the map
   Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
@@ -728,7 +636,8 @@
 
   HValue* size = Add<HConstant>(WeakCell::kSize);
   HInstruction* object =
-      Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE);
+      Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE,
+                     graph()->GetConstant0());
 
   Handle<Map> weak_cell_map = isolate()->factory()->weak_cell_map();
   AddStoreMapConstant(object, weak_cell_map);
@@ -856,18 +765,6 @@
     check.End();
   }
 
-  // Disallow pushing onto observed objects.
-  {
-    HValue* bit_field =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
-    HValue* mask = Add<HConstant>(1 << Map::kIsObserved);
-    HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
-    IfBuilder check(this);
-    check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
-    check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
-    check.End();
-  }
-
   // Disallow pushing onto arrays in dictionary named property mode. We need to
   // figure out whether the length property is still writable.
   {
@@ -1272,7 +1169,7 @@
         // TODO(hpayer): Allocation site pretenuring support.
         HInstruction* heap_number =
             Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
-                           MUTABLE_HEAP_NUMBER_TYPE);
+                           MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
         AddStoreMapConstant(heap_number,
                             isolate()->factory()->mutable_heap_number_map());
         Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
@@ -1394,20 +1291,6 @@
   return DoGenerateCode(this);
 }
 
-
-template <>
-HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() {
-  HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED,
-                                  JS_OBJECT_TYPE);
-  return result;
-}
-
-
-Handle<Code> AllocateInNewSpaceStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
 HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
     ElementsKind kind,
     AllocationSiteOverrideMode override_mode,
@@ -1498,7 +1381,6 @@
       ? JSArrayBuilder::FILL_WITH_HOLE
       : JSArrayBuilder::DONT_FILL_WITH_HOLE;
   HValue* new_object = array_builder->AllocateArray(checked_length,
-                                                    max_alloc_length,
                                                     checked_length,
                                                     fill_mode);
   HValue* elements = array_builder->GetElementsLocation();
@@ -1527,12 +1409,6 @@
   return BuildArrayConstructor(kind, override_mode, NONE);
 }
 
-
-Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
 template <>
 HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
     BuildCodeStub() {
@@ -1568,11 +1444,6 @@
 }
 
 
-Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
 template <>
 HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
     BuildCodeStub() {
@@ -1746,9 +1617,9 @@
       // Convert the primitive to a string value.
       ToStringStub stub(isolate());
       HValue* values[] = {context(), Pop()};
-      Push(AddUncasted<HCallWithDescriptor>(
-          Add<HConstant>(stub.GetCode()), 0, stub.GetCallInterfaceDescriptor(),
-          Vector<HValue*>(values, arraysize(values))));
+      Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(stub.GetCode()), 0,
+                                            stub.GetCallInterfaceDescriptor(),
+                                            ArrayVector(values)));
     }
     if_inputisstring.End();
   }
@@ -2013,182 +1884,6 @@
 Handle<Code> ToObjectStub::GenerateCode() { return DoGenerateCode(this); }
 
 
-void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
-    HValue* js_function,
-    HValue* native_context,
-    IfBuilder* builder,
-    HValue* optimized_map,
-    HValue* map_index) {
-  HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
-  HValue* context_slot = LoadFromOptimizedCodeMap(
-      optimized_map, map_index, SharedFunctionInfo::kContextOffset);
-  context_slot = Add<HLoadNamedField>(context_slot, nullptr,
-                                      HObjectAccess::ForWeakCellValue());
-  HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
-      optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
-  HValue* code_object = LoadFromOptimizedCodeMap(
-      optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
-  code_object = Add<HLoadNamedField>(code_object, nullptr,
-                                     HObjectAccess::ForWeakCellValue());
-  builder->If<HCompareObjectEqAndBranch>(native_context,
-                                         context_slot);
-  builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
-  builder->And();
-  builder->IfNot<HCompareObjectEqAndBranch>(code_object,
-                                            graph()->GetConstant0());
-  builder->Then();
-  HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
-      map_index, SharedFunctionInfo::kLiteralsOffset);
-  literals = Add<HLoadNamedField>(literals, nullptr,
-                                  HObjectAccess::ForWeakCellValue());
-  IfBuilder maybe_deopt(this);
-  maybe_deopt.If<HCompareObjectEqAndBranch>(literals, graph()->GetConstant0());
-  maybe_deopt.ThenDeopt(Deoptimizer::kLiteralsWereDisposed);
-  maybe_deopt.End();
-
-  BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
-
-  // The builder continues in the "then" after this function.
-}
-
-
-void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(HValue* js_function,
-                                                         HValue* native_context,
-                                                         HValue* code_object,
-                                                         HValue* literals) {
-  Counters* counters = isolate()->counters();
-  AddIncrementCounter(counters->fast_new_closure_install_optimized());
-
-  // TODO(fschneider): Idea: store proper code pointers in the optimized code
-  // map and either unmangle them on marking or do nothing as the whole map is
-  // discarded on major GC anyway.
-  Add<HStoreCodeEntry>(js_function, code_object);
-  Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
-                        literals);
-
-  // Now link a function into a list of optimized functions.
-  HValue* optimized_functions_list = Add<HLoadNamedField>(
-      native_context, nullptr,
-      HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
-  Add<HStoreNamedField>(js_function,
-                        HObjectAccess::ForNextFunctionLinkPointer(),
-                        optimized_functions_list);
-
-  // This store is the only one that should have a write barrier.
-  Add<HStoreNamedField>(native_context,
-           HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
-           js_function);
-}
-
-
-void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
-                                                HValue* shared_info) {
-  Add<HStoreNamedField>(js_function,
-                        HObjectAccess::ForNextFunctionLinkPointer(),
-                        graph()->GetConstantUndefined());
-  HValue* code_object = Add<HLoadNamedField>(shared_info, nullptr,
-                                             HObjectAccess::ForCodeOffset());
-  Add<HStoreCodeEntry>(js_function, code_object);
-}
-
-
-HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
-    HValue* optimized_map,
-    HValue* iterator,
-    int field_offset) {
-  // By making sure to express these loads in the form [<hvalue> + constant]
-  // the keyed load can be hoisted.
-  DCHECK(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
-  HValue* field_slot = iterator;
-  if (field_offset > 0) {
-    HValue* field_offset_value = Add<HConstant>(field_offset);
-    field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
-  }
-  HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
-                                              nullptr, nullptr, FAST_ELEMENTS);
-  return field_entry;
-}
-
-
-void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
-    HValue* js_function,
-    HValue* shared_info,
-    HValue* native_context) {
-  Counters* counters = isolate()->counters();
-  Factory* factory = isolate()->factory();
-  IfBuilder is_optimized(this);
-  HInstruction* optimized_map = Add<HLoadNamedField>(
-      shared_info, nullptr, HObjectAccess::ForOptimizedCodeMap());
-  HValue* null_constant = Add<HConstant>(0);
-  is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
-  is_optimized.Then();
-  {
-    BuildInstallCode(js_function, shared_info);
-  }
-  is_optimized.Else();
-  {
-    AddIncrementCounter(counters->fast_new_closure_try_optimized());
-    // The {optimized_map} points to fixed array of 4-element entries:
-    //   (native context, optimized code, literals, ast-id).
-    // Iterate through the {optimized_map} backwards. After the loop, if no
-    // matching optimized code was found, install unoptimized code.
-    //   for(i = map.length() - SharedFunctionInfo::kEntryLength;
-    //       i >= SharedFunctionInfo::kEntriesStart;
-    //       i -= SharedFunctionInfo::kEntryLength) { ... }
-    HValue* first_entry_index =
-        Add<HConstant>(SharedFunctionInfo::kEntriesStart);
-    HValue* shared_function_entry_length =
-        Add<HConstant>(SharedFunctionInfo::kEntryLength);
-    LoopBuilder loop_builder(this, context(), LoopBuilder::kPostDecrement,
-                             shared_function_entry_length);
-    HValue* array_length = Add<HLoadNamedField>(
-        optimized_map, nullptr, HObjectAccess::ForFixedArrayLength());
-    HValue* start_pos =
-        AddUncasted<HSub>(array_length, shared_function_entry_length);
-    HValue* slot_iterator =
-        loop_builder.BeginBody(start_pos, first_entry_index, Token::GTE);
-    {
-      IfBuilder done_check(this);
-      BuildCheckAndInstallOptimizedCode(js_function, native_context,
-                                        &done_check, optimized_map,
-                                        slot_iterator);
-      // Fall out of the loop
-      loop_builder.Break();
-    }
-    loop_builder.EndBody();
-
-    // If {slot_iterator} is less than the first entry index, then we failed to
-    // find a context-dependent code and try context-independent code next.
-    IfBuilder no_optimized_code_check(this);
-    no_optimized_code_check.If<HCompareNumericAndBranch>(
-        slot_iterator, first_entry_index, Token::LT);
-    no_optimized_code_check.Then();
-    {
-      IfBuilder shared_code_check(this);
-      HValue* shared_code =
-          Add<HLoadNamedField>(optimized_map, nullptr,
-                               HObjectAccess::ForOptimizedCodeMapSharedCode());
-      shared_code = Add<HLoadNamedField>(shared_code, nullptr,
-                                         HObjectAccess::ForWeakCellValue());
-      shared_code_check.IfNot<HCompareObjectEqAndBranch>(
-          shared_code, graph()->GetConstant0());
-      shared_code_check.Then();
-      {
-        // Store the context-independent optimized code.
-        HValue* literals = Add<HConstant>(factory->empty_fixed_array());
-        BuildInstallOptimizedCode(js_function, native_context, shared_code,
-                                  literals);
-      }
-      shared_code_check.Else();
-      {
-        // Store the unoptimized code.
-        BuildInstallCode(js_function, shared_info);
-      }
-    }
-  }
-}
-
-
 template<>
 HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
   Counters* counters = isolate()->counters();
@@ -2202,7 +1897,8 @@
   // Create a new closure from the given function info in new space
   HValue* size = Add<HConstant>(JSFunction::kSize);
   HInstruction* js_function =
-      Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE);
+      Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE,
+                     graph()->GetConstant0());
 
   int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
                                             casted_stub()->kind());
@@ -2228,10 +1924,13 @@
   Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
                         context());
 
-  // Initialize the code pointer in the function to be the one found in the
-  // shared function info object. But first check if there is an optimized
-  // version for our context.
-  BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
+  Handle<Code> lazy_builtin(
+      isolate()->builtins()->builtin(Builtins::kCompileLazy));
+  HConstant* lazy = Add<HConstant>(lazy_builtin);
+  Add<HStoreCodeEntry>(js_function, lazy);
+  Add<HStoreNamedField>(js_function,
+                        HObjectAccess::ForNextFunctionLinkPointer(),
+                        graph()->GetConstantUndefined());
 
   return js_function;
 }
@@ -2252,7 +1951,8 @@
   // Allocate the context in new space.
   HAllocate* function_context = Add<HAllocate>(
       Add<HConstant>(length * kPointerSize + FixedArray::kHeaderSize),
-      HType::HeapObject(), NOT_TENURED, FIXED_ARRAY_TYPE);
+      HType::HeapObject(), NOT_TENURED, FIXED_ARRAY_TYPE,
+      graph()->GetConstant0());
 
   // Set up the object header.
   AddStoreMapConstant(function_context,
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 60b350c..d7ea506 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -8,7 +8,7 @@
 
 #include "src/bootstrapper.h"
 #include "src/code-factory.h"
-#include "src/compiler/code-stub-assembler.h"
+#include "src/code-stub-assembler.h"
 #include "src/factory.h"
 #include "src/gdb-jit.h"
 #include "src/ic/handler-compiler.h"
@@ -99,8 +99,7 @@
 
 
 Code::Flags CodeStub::GetCodeFlags() const {
-  return Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState(),
-                            GetStubType());
+  return Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState());
 }
 
 
@@ -135,11 +134,8 @@
   CodeDesc desc;
   masm.GetCode(&desc);
   // Copy the generated code into a heap object.
-  Code::Flags flags = Code::ComputeFlags(
-      GetCodeKind(),
-      GetICState(),
-      GetExtraICState(),
-      GetStubType());
+  Code::Flags flags =
+      Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState());
   Handle<Code> new_object = factory->NewCode(
       desc, flags, masm.CodeObject(), NeedsImmovableCode());
   return new_object;
@@ -390,19 +386,14 @@
 
 
 bool CompareICStub::FindCodeInSpecialCache(Code** code_out) {
-  Factory* factory = isolate()->factory();
   Code::Flags flags = Code::ComputeFlags(
       GetCodeKind(),
       UNINITIALIZED);
-  Handle<Object> probe(
-      known_map_->FindInCodeCache(
-        strict() ?
-            *factory->strict_compare_ic_string() :
-            *factory->compare_ic_string(),
-        flags),
-      isolate());
-  if (probe->IsCode()) {
-    *code_out = Code::cast(*probe);
+  Name* name = strict() ? isolate()->heap()->strict_compare_ic_string()
+                        : isolate()->heap()->compare_ic_string();
+  Code* code = known_map_->LookupInCodeCache(name, flags);
+  if (code != nullptr) {
+    *code_out = code;
 #ifdef DEBUG
     CompareICStub decode((*code_out)->stub_key(), isolate());
     DCHECK(op() == decode.op());
@@ -457,36 +448,25 @@
   const char* name = CodeStub::MajorName(MajorKey());
   Zone zone(isolate()->allocator());
   CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
-  compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
-                                        GetCodeFlags(), name);
+  CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
+                              name);
   GenerateAssembly(&assembler);
   return assembler.GenerateCode();
 }
 
 void AllocateHeapNumberStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+    CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
 
   Node* result = assembler->AllocateHeapNumber();
   assembler->Return(result);
 }
 
-void AllocateMutableHeapNumberStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* result = assembler->Allocate(HeapNumber::kSize);
-  assembler->StoreMapNoWriteBarrier(
-      result,
-      assembler->HeapConstant(isolate()->factory()->mutable_heap_number_map()));
-  assembler->Return(result);
-}
-
 #define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type)            \
-  void Allocate##Type##Stub::GenerateAssembly(                              \
-      compiler::CodeStubAssembler* assembler) const {                       \
-    compiler::Node* result = assembler->Allocate(                           \
-        Simd128Value::kSize, compiler::CodeStubAssembler::kNone);           \
+  void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
+      const {                                                               \
+    compiler::Node* result =                                                \
+        assembler->Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
     compiler::Node* map_offset =                                            \
         assembler->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag); \
     compiler::Node* map = assembler->IntPtrAdd(result, map_offset);         \
@@ -498,8 +478,7 @@
 SIMD128_TYPES(SIMD128_GEN_ASM)
 #undef SIMD128_GEN_ASM
 
-void StringLengthStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   compiler::Node* value = assembler->Parameter(0);
   compiler::Node* string =
       assembler->LoadObjectField(value, JSValue::kValueOffset);
@@ -508,12 +487,13 @@
   assembler->Return(result);
 }
 
-void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
-  typedef compiler::CodeStubAssembler::Label Label;
+// static
+compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
+                                  compiler::Node* left, compiler::Node* right,
+                                  compiler::Node* context) {
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
-
-  Node* context = assembler->Parameter(2);
+  typedef CodeStubAssembler::Variable Variable;
 
   // Shared entry for floating point addition.
   Label do_fadd(assembler);
@@ -523,11 +503,14 @@
   // We might need to loop several times due to ToPrimitive, ToString and/or
   // ToNumber conversions.
   Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged);
+      var_rhs(assembler, MachineRepresentation::kTagged),
+      var_result(assembler, MachineRepresentation::kTagged);
   Variable* loop_vars[2] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_vars);
-  var_lhs.Bind(assembler->Parameter(0));
-  var_rhs.Bind(assembler->Parameter(1));
+  Label loop(assembler, 2, loop_vars), end(assembler),
+      string_add_convert_left(assembler, Label::kDeferred),
+      string_add_convert_right(assembler, Label::kDeferred);
+  var_lhs.Bind(left);
+  var_rhs.Bind(right);
   assembler->Goto(&loop);
   assembler->Bind(&loop);
   {
@@ -564,7 +547,8 @@
         }
 
         assembler->Bind(&if_notoverflow);
-        assembler->Return(assembler->Projection(0, pair));
+        var_result.Bind(assembler->Projection(0, pair));
+        assembler->Goto(&end);
       }
 
       assembler->Bind(&if_rhsisnotsmi);
@@ -601,11 +585,9 @@
 
           assembler->Bind(&if_rhsisstring);
           {
-            // Convert {lhs}, which is a Smi, to a String and concatenate the
-            // resulting string with the String {rhs}.
-            Callable callable = CodeFactory::StringAdd(
-                assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
-            assembler->TailCallStub(callable, context, lhs, rhs);
+            var_lhs.Bind(lhs);
+            var_rhs.Bind(rhs);
+            assembler->Goto(&string_add_convert_left);
           }
 
           assembler->Bind(&if_rhsisnotstring);
@@ -655,11 +637,9 @@
 
       assembler->Bind(&if_lhsisstring);
       {
-        // Convert {rhs} to a String (using the sequence of ToPrimitive with
-        // no hint followed by ToString) and concatenate the strings.
-        Callable callable = CodeFactory::StringAdd(
-            assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
-        assembler->TailCallStub(callable, context, lhs, rhs);
+        var_lhs.Bind(lhs);
+        var_rhs.Bind(rhs);
+        assembler->Goto(&string_add_convert_right);
       }
 
       assembler->Bind(&if_lhsisnotstring);
@@ -733,11 +713,9 @@
 
           assembler->Bind(&if_rhsisstring);
           {
-            // Convert {lhs} to a String (using the sequence of ToPrimitive with
-            // no hint followed by ToString) and concatenate the strings.
-            Callable callable = CodeFactory::StringAdd(
-                assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
-            assembler->TailCallStub(callable, context, lhs, rhs);
+            var_lhs.Bind(lhs);
+            var_rhs.Bind(rhs);
+            assembler->Goto(&string_add_convert_left);
           }
 
           assembler->Bind(&if_rhsisnotstring);
@@ -853,6 +831,27 @@
       }
     }
   }
+  assembler->Bind(&string_add_convert_left);
+  {
+    // Convert {lhs}, which is a Smi, to a String and concatenate the
+    // resulting string with the String {rhs}.
+    Callable callable = CodeFactory::StringAdd(
+        assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+    var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+                                        var_rhs.value()));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&string_add_convert_right);
+  {
+    // Convert {lhs}, which is a Smi, to a String and concatenate the
+    // resulting string with the String {rhs}.
+    Callable callable = CodeFactory::StringAdd(
+        assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+    var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+                                        var_rhs.value()));
+    assembler->Goto(&end);
+  }
 
   assembler->Bind(&do_fadd);
   {
@@ -860,31 +859,36 @@
     Node* rhs_value = var_fadd_rhs.value();
     Node* value = assembler->Float64Add(lhs_value, rhs_value);
     Node* result = assembler->ChangeFloat64ToTagged(value);
-    assembler->Return(result);
+    var_result.Bind(result);
+    assembler->Goto(&end);
   }
+  assembler->Bind(&end);
+  return var_result.value();
 }
 
-void SubtractStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
-  typedef compiler::CodeStubAssembler::Label Label;
+// static
+compiler::Node* SubtractStub::Generate(CodeStubAssembler* assembler,
+                                       compiler::Node* left,
+                                       compiler::Node* right,
+                                       compiler::Node* context) {
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
-
-  Node* context = assembler->Parameter(2);
+  typedef CodeStubAssembler::Variable Variable;
 
   // Shared entry for floating point subtraction.
-  Label do_fsub(assembler);
+  Label do_fsub(assembler), end(assembler);
   Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
       var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
 
   // We might need to loop several times due to ToPrimitive and/or ToNumber
   // conversions.
   Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged);
+      var_rhs(assembler, MachineRepresentation::kTagged),
+      var_result(assembler, MachineRepresentation::kTagged);
   Variable* loop_vars[2] = {&var_lhs, &var_rhs};
   Label loop(assembler, 2, loop_vars);
-  var_lhs.Bind(assembler->Parameter(0));
-  var_rhs.Bind(assembler->Parameter(1));
+  var_lhs.Bind(left);
+  var_rhs.Bind(right);
   assembler->Goto(&loop);
   assembler->Bind(&loop);
   {
@@ -922,7 +926,8 @@
         }
 
         assembler->Bind(&if_notoverflow);
-        assembler->Return(assembler->Projection(0, pair));
+        var_result.Bind(assembler->Projection(0, pair));
+        assembler->Goto(&end);
       }
 
       assembler->Bind(&if_rhsisnotsmi);
@@ -948,7 +953,8 @@
         assembler->Bind(&if_rhsisnotnumber);
         {
           // Convert the {rhs} to a Number first.
-          Callable callable = CodeFactory::NonNumberToNumber(isolate());
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
           var_rhs.Bind(assembler->CallStub(callable, context, rhs));
           assembler->Goto(&loop);
         }
@@ -1004,7 +1010,8 @@
           assembler->Bind(&if_rhsisnotnumber);
           {
             // Convert the {rhs} to a Number first.
-            Callable callable = CodeFactory::NonNumberToNumber(isolate());
+            Callable callable =
+                CodeFactory::NonNumberToNumber(assembler->isolate());
             var_rhs.Bind(assembler->CallStub(callable, context, rhs));
             assembler->Goto(&loop);
           }
@@ -1014,7 +1021,8 @@
       assembler->Bind(&if_lhsisnotnumber);
       {
         // Convert the {lhs} to a Number first.
-        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
         var_lhs.Bind(assembler->CallStub(callable, context, lhs));
         assembler->Goto(&loop);
       }
@@ -1026,51 +1034,834 @@
     Node* lhs_value = var_fsub_lhs.value();
     Node* rhs_value = var_fsub_rhs.value();
     Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+    var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+    assembler->Goto(&end);
+  }
+  assembler->Bind(&end);
+  return var_result.value();
+}
+
+// static
+compiler::Node* MultiplyStub::Generate(CodeStubAssembler* assembler,
+                                       compiler::Node* left,
+                                       compiler::Node* right,
+                                       compiler::Node* context) {
+  using compiler::Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  // Shared entry point for floating point multiplication.
+  Label do_fmul(assembler);
+  Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
+      var_rhs_float64(assembler, MachineRepresentation::kFloat64);
+
+  Node* number_map = assembler->HeapNumberMapConstant();
+
+  // We might need to loop one or two times due to ToNumber conversions.
+  Variable var_lhs(assembler, MachineRepresentation::kTagged),
+      var_rhs(assembler, MachineRepresentation::kTagged);
+  Variable* loop_variables[] = {&var_lhs, &var_rhs};
+  Label loop(assembler, 2, loop_variables);
+  var_lhs.Bind(left);
+  var_rhs.Bind(right);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    Node* lhs = var_lhs.value();
+    Node* rhs = var_rhs.value();
+
+    Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
+    assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+
+    assembler->Bind(&lhs_is_smi);
+    {
+      Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+      assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
+                        &rhs_is_not_smi);
+
+      assembler->Bind(&rhs_is_smi);
+      {
+        // Both {lhs} and {rhs} are Smis. Convert them to double and multiply.
+        // TODO(epertoso): use SmiMulWithOverflow once available.
+        var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
+        var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
+        assembler->Goto(&do_fmul);
+      }
+
+      assembler->Bind(&rhs_is_not_smi);
+      {
+        Node* rhs_map = assembler->LoadMap(rhs);
+
+        // Check if {rhs} is a HeapNumber.
+        Label rhs_is_number(assembler),
+            rhs_is_not_number(assembler, Label::kDeferred);
+        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                          &rhs_is_number, &rhs_is_not_number);
+
+        assembler->Bind(&rhs_is_number);
+        {
+          // Convert {lhs} to a double and multiply it with the value of {rhs}.
+          var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
+          var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+          assembler->Goto(&do_fmul);
+        }
+
+        assembler->Bind(&rhs_is_not_number);
+        {
+          // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+          var_lhs.Bind(rhs);
+          var_rhs.Bind(lhs);
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&lhs_is_not_smi);
+    {
+      Node* lhs_map = assembler->LoadMap(lhs);
+
+      // Check if {lhs} is a HeapNumber.
+      Label lhs_is_number(assembler),
+          lhs_is_not_number(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+                        &lhs_is_number, &lhs_is_not_number);
+
+      assembler->Bind(&lhs_is_number);
+      {
+        // Check if {rhs} is a Smi.
+        Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+        assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
+                          &rhs_is_not_smi);
+
+        assembler->Bind(&rhs_is_smi);
+        {
+          // Convert {rhs} to a double and multiply it with the value of {lhs}.
+          var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+          var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
+          assembler->Goto(&do_fmul);
+        }
+
+        assembler->Bind(&rhs_is_not_smi);
+        {
+          Node* rhs_map = assembler->LoadMap(rhs);
+
+          // Check if {rhs} is a HeapNumber.
+          Label rhs_is_number(assembler),
+              rhs_is_not_number(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+                            &rhs_is_number, &rhs_is_not_number);
+
+          assembler->Bind(&rhs_is_number);
+          {
+            // Both {lhs} and {rhs} are HeapNumbers. Load their values and
+            // multiply them.
+            var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+            var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+            assembler->Goto(&do_fmul);
+          }
+
+          assembler->Bind(&rhs_is_not_number);
+          {
+            // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+            var_lhs.Bind(rhs);
+            var_rhs.Bind(lhs);
+            assembler->Goto(&loop);
+          }
+        }
+      }
+
+      assembler->Bind(&lhs_is_not_number);
+      {
+        // Convert {lhs} to a Number and loop.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fmul);
+  {
+    Node* value =
+        assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
     Node* result = assembler->ChangeFloat64ToTagged(value);
-    assembler->Return(result);
+    return result;
   }
 }
 
-void BitwiseAndStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* DivideStub::Generate(CodeStubAssembler* assembler,
+                                     compiler::Node* left,
+                                     compiler::Node* right,
+                                     compiler::Node* context) {
+  using compiler::Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  // Shared entry point for floating point division.
+  Label do_fdiv(assembler), end(assembler);
+  Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+      var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+  Node* number_map = assembler->HeapNumberMapConstant();
+
+  // We might need to loop one or two times due to ToNumber conversions.
+  Variable var_dividend(assembler, MachineRepresentation::kTagged),
+      var_divisor(assembler, MachineRepresentation::kTagged),
+      var_result(assembler, MachineRepresentation::kTagged);
+  Variable* loop_variables[] = {&var_dividend, &var_divisor};
+  Label loop(assembler, 2, loop_variables);
+  var_dividend.Bind(left);
+  var_divisor.Bind(right);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    Node* dividend = var_dividend.value();
+    Node* divisor = var_divisor.value();
+
+    Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+    assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+                      &dividend_is_not_smi);
+
+    assembler->Bind(&dividend_is_smi);
+    {
+      Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+      assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+                        &divisor_is_not_smi);
+
+      assembler->Bind(&divisor_is_smi);
+      {
+        Label bailout(assembler);
+
+        // Do floating point division if {divisor} is zero.
+        assembler->GotoIf(
+            assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
+            &bailout);
+
+        // Do floating point division {dividend} is zero and {divisor} is
+        // negative.
+        Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
+        assembler->Branch(
+            assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+            &dividend_is_zero, &dividend_is_not_zero);
+
+        assembler->Bind(&dividend_is_zero);
+        {
+          assembler->GotoIf(
+              assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+              &bailout);
+          assembler->Goto(&dividend_is_not_zero);
+        }
+        assembler->Bind(&dividend_is_not_zero);
+
+        Node* untagged_divisor = assembler->SmiUntag(divisor);
+        Node* untagged_dividend = assembler->SmiUntag(dividend);
+
+        // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+        // if the Smi size is 31) and {divisor} is -1.
+        Label divisor_is_minus_one(assembler),
+            divisor_is_not_minus_one(assembler);
+        assembler->Branch(assembler->Word32Equal(untagged_divisor,
+                                                 assembler->Int32Constant(-1)),
+                          &divisor_is_minus_one, &divisor_is_not_minus_one);
+
+        assembler->Bind(&divisor_is_minus_one);
+        {
+          assembler->GotoIf(
+              assembler->Word32Equal(
+                  untagged_dividend,
+                  assembler->Int32Constant(
+                      kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+              &bailout);
+          assembler->Goto(&divisor_is_not_minus_one);
+        }
+        assembler->Bind(&divisor_is_not_minus_one);
+
+        // TODO(epertoso): consider adding a machine instruction that returns
+        // both the result and the remainder.
+        Node* untagged_result =
+            assembler->Int32Div(untagged_dividend, untagged_divisor);
+        Node* truncated =
+            assembler->IntPtrMul(untagged_result, untagged_divisor);
+        // Do floating point division if the remainder is not 0.
+        assembler->GotoIf(
+            assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
+        var_result.Bind(assembler->SmiTag(untagged_result));
+        assembler->Goto(&end);
+
+        // Bailout: convert {dividend} and {divisor} to double and do double
+        // division.
+        assembler->Bind(&bailout);
+        {
+          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+          assembler->Goto(&do_fdiv);
+        }
+      }
+
+      assembler->Bind(&divisor_is_not_smi);
+      {
+        Node* divisor_map = assembler->LoadMap(divisor);
+
+        // Check if {divisor} is a HeapNumber.
+        Label divisor_is_number(assembler),
+            divisor_is_not_number(assembler, Label::kDeferred);
+        assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+                          &divisor_is_number, &divisor_is_not_number);
+
+        assembler->Bind(&divisor_is_number);
+        {
+          // Convert {dividend} to a double and divide it with the value of
+          // {divisor}.
+          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+          var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+          assembler->Goto(&do_fdiv);
+        }
+
+        assembler->Bind(&divisor_is_not_number);
+        {
+          // Convert {divisor} to a number and loop.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&dividend_is_not_smi);
+    {
+      Node* dividend_map = assembler->LoadMap(dividend);
+
+      // Check if {dividend} is a HeapNumber.
+      Label dividend_is_number(assembler),
+          dividend_is_not_number(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+                        &dividend_is_number, &dividend_is_not_number);
+
+      assembler->Bind(&dividend_is_number);
+      {
+        // Check if {divisor} is a Smi.
+        Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+        assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+                          &divisor_is_not_smi);
+
+        assembler->Bind(&divisor_is_smi);
+        {
+          // Convert {divisor} to a double and use it for a floating point
+          // division.
+          var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+          assembler->Goto(&do_fdiv);
+        }
+
+        assembler->Bind(&divisor_is_not_smi);
+        {
+          Node* divisor_map = assembler->LoadMap(divisor);
+
+          // Check if {divisor} is a HeapNumber.
+          Label divisor_is_number(assembler),
+              divisor_is_not_number(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+                            &divisor_is_number, &divisor_is_not_number);
+
+          assembler->Bind(&divisor_is_number);
+          {
+            // Both {dividend} and {divisor} are HeapNumbers. Load their values
+            // and divide them.
+            var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+            var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+            assembler->Goto(&do_fdiv);
+          }
+
+          assembler->Bind(&divisor_is_not_number);
+          {
+            // Convert {divisor} to a number and loop.
+            Callable callable =
+                CodeFactory::NonNumberToNumber(assembler->isolate());
+            var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+            assembler->Goto(&loop);
+          }
+        }
+      }
+
+      assembler->Bind(&dividend_is_not_number);
+      {
+        // Convert {dividend} to a Number and loop.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fdiv);
+  {
+    Node* value = assembler->Float64Div(var_dividend_float64.value(),
+                                        var_divisor_float64.value());
+    var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+    assembler->Goto(&end);
+  }
+  assembler->Bind(&end);
+  return var_result.value();
+}
+
+// static
+compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler,
+                                      compiler::Node* left,
+                                      compiler::Node* right,
+                                      compiler::Node* context) {
+  using compiler::Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  // Shared entry point for floating point modulus.
+  Label do_fmod(assembler);
+  Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+      var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+  Node* number_map = assembler->HeapNumberMapConstant();
+
+  // We might need to loop one or two times due to ToNumber conversions.
+  Variable var_dividend(assembler, MachineRepresentation::kTagged),
+      var_divisor(assembler, MachineRepresentation::kTagged);
+  Variable* loop_variables[] = {&var_dividend, &var_divisor};
+  Label loop(assembler, 2, loop_variables);
+  var_dividend.Bind(left);
+  var_divisor.Bind(right);
+  assembler->Goto(&loop);
+  assembler->Bind(&loop);
+  {
+    Node* dividend = var_dividend.value();
+    Node* divisor = var_divisor.value();
+
+    Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+    assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+                      &dividend_is_not_smi);
+
+    assembler->Bind(&dividend_is_smi);
+    {
+      Label dividend_is_not_zero(assembler);
+      Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+      assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+                        &divisor_is_not_smi);
+
+      assembler->Bind(&divisor_is_smi);
+      {
+        var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+        var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+        assembler->Goto(&do_fmod);
+      }
+
+      assembler->Bind(&divisor_is_not_smi);
+      {
+        Node* divisor_map = assembler->LoadMap(divisor);
+
+        // Check if {divisor} is a HeapNumber.
+        Label divisor_is_number(assembler),
+            divisor_is_not_number(assembler, Label::kDeferred);
+        assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+                          &divisor_is_number, &divisor_is_not_number);
+
+        assembler->Bind(&divisor_is_number);
+        {
+          // Convert {dividend} to a double and compute its modulus with the
+          // value of {dividend}.
+          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+          var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+          assembler->Goto(&do_fmod);
+        }
+
+        assembler->Bind(&divisor_is_not_number);
+        {
+          // Convert {divisor} to a number and loop.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+          assembler->Goto(&loop);
+        }
+      }
+    }
+
+    assembler->Bind(&dividend_is_not_smi);
+    {
+      Node* dividend_map = assembler->LoadMap(dividend);
+
+      // Check if {dividend} is a HeapNumber.
+      Label dividend_is_number(assembler),
+          dividend_is_not_number(assembler, Label::kDeferred);
+      assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+                        &dividend_is_number, &dividend_is_not_number);
+
+      assembler->Bind(&dividend_is_number);
+      {
+        // Check if {divisor} is a Smi.
+        Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+        assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+                          &divisor_is_not_smi);
+
+        assembler->Bind(&divisor_is_smi);
+        {
+          // Convert {divisor} to a double and compute {dividend}'s modulus with
+          // it.
+          var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+          assembler->Goto(&do_fmod);
+        }
+
+        assembler->Bind(&divisor_is_not_smi);
+        {
+          Node* divisor_map = assembler->LoadMap(divisor);
+
+          // Check if {divisor} is a HeapNumber.
+          Label divisor_is_number(assembler),
+              divisor_is_not_number(assembler, Label::kDeferred);
+          assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+                            &divisor_is_number, &divisor_is_not_number);
+
+          assembler->Bind(&divisor_is_number);
+          {
+            // Both {dividend} and {divisor} are HeapNumbers. Load their values
+            // and compute their modulus.
+            var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+            var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+            assembler->Goto(&do_fmod);
+          }
+
+          assembler->Bind(&divisor_is_not_number);
+          {
+            // Convert {divisor} to a number and loop.
+            Callable callable =
+                CodeFactory::NonNumberToNumber(assembler->isolate());
+            var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+            assembler->Goto(&loop);
+          }
+        }
+      }
+
+      assembler->Bind(&dividend_is_not_number);
+      {
+        // Convert {dividend} to a Number and loop.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+        assembler->Goto(&loop);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fmod);
+  {
+    Node* value = assembler->Float64Mod(var_dividend_float64.value(),
+                                        var_divisor_float64.value());
+    Node* result = assembler->ChangeFloat64ToTagged(value);
+    return result;
+  }
+}
+
+// static
+compiler::Node* ShiftLeftStub::Generate(CodeStubAssembler* assembler,
+                                        compiler::Node* left,
+                                        compiler::Node* right,
+                                        compiler::Node* context) {
   using compiler::Node;
 
-  Node* lhs = assembler->Parameter(0);
-  Node* rhs = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* shift_count =
+      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+  Node* value = assembler->Word32Shl(lhs_value, shift_count);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  return result;
+}
+
+// static
+compiler::Node* ShiftRightStub::Generate(CodeStubAssembler* assembler,
+                                         compiler::Node* left,
+                                         compiler::Node* right,
+                                         compiler::Node* context) {
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* shift_count =
+      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+  Node* value = assembler->Word32Sar(lhs_value, shift_count);
+  Node* result = assembler->ChangeInt32ToTagged(value);
+  return result;
+}
+
+// static
+compiler::Node* ShiftRightLogicalStub::Generate(CodeStubAssembler* assembler,
+                                                compiler::Node* left,
+                                                compiler::Node* right,
+                                                compiler::Node* context) {
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+  Node* shift_count =
+      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+  Node* value = assembler->Word32Shr(lhs_value, shift_count);
+  Node* result = assembler->ChangeUint32ToTagged(value);
+  return result;
+}
+
+// static
+compiler::Node* BitwiseAndStub::Generate(CodeStubAssembler* assembler,
+                                         compiler::Node* left,
+                                         compiler::Node* right,
+                                         compiler::Node* context) {
+  using compiler::Node;
+
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
   Node* value = assembler->Word32And(lhs_value, rhs_value);
   Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+  return result;
 }
 
-void BitwiseOrStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* BitwiseOrStub::Generate(CodeStubAssembler* assembler,
+                                        compiler::Node* left,
+                                        compiler::Node* right,
+                                        compiler::Node* context) {
   using compiler::Node;
 
-  Node* lhs = assembler->Parameter(0);
-  Node* rhs = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
   Node* value = assembler->Word32Or(lhs_value, rhs_value);
   Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+  return result;
 }
 
-void BitwiseXorStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* BitwiseXorStub::Generate(CodeStubAssembler* assembler,
+                                         compiler::Node* left,
+                                         compiler::Node* right,
+                                         compiler::Node* context) {
   using compiler::Node;
 
-  Node* lhs = assembler->Parameter(0);
-  Node* rhs = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
   Node* value = assembler->Word32Xor(lhs_value, rhs_value);
   Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+  return result;
+}
+
+// static
+compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
+                                  compiler::Node* value,
+                                  compiler::Node* context) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  // Shared entry for floating point increment.
+  Label do_finc(assembler), end(assembler);
+  Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to try again due to ToNumber conversion.
+  Variable value_var(assembler, MachineRepresentation::kTagged);
+  Variable result_var(assembler, MachineRepresentation::kTagged);
+  Label start(assembler, &value_var);
+  value_var.Bind(value);
+  assembler->Goto(&start);
+  assembler->Bind(&start);
+  {
+    value = value_var.value();
+
+    Label if_issmi(assembler), if_isnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+
+    assembler->Bind(&if_issmi);
+    {
+      // Try fast Smi addition first.
+      Node* one = assembler->SmiConstant(Smi::FromInt(1));
+      Node* pair = assembler->SmiAddWithOverflow(value, one);
+      Node* overflow = assembler->Projection(1, pair);
+
+      // Check if the Smi additon overflowed.
+      Label if_overflow(assembler), if_notoverflow(assembler);
+      assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+      assembler->Bind(&if_notoverflow);
+      result_var.Bind(assembler->Projection(0, pair));
+      assembler->Goto(&end);
+
+      assembler->Bind(&if_overflow);
+      {
+        var_finc_value.Bind(assembler->SmiToFloat64(value));
+        assembler->Goto(&do_finc);
+      }
+    }
+
+    assembler->Bind(&if_isnotsmi);
+    {
+      // Check if the value is a HeapNumber.
+      Label if_valueisnumber(assembler),
+          if_valuenotnumber(assembler, Label::kDeferred);
+      Node* value_map = assembler->LoadMap(value);
+      Node* number_map = assembler->HeapNumberMapConstant();
+      assembler->Branch(assembler->WordEqual(value_map, number_map),
+                        &if_valueisnumber, &if_valuenotnumber);
+
+      assembler->Bind(&if_valueisnumber);
+      {
+        // Load the HeapNumber value.
+        var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
+        assembler->Goto(&do_finc);
+      }
+
+      assembler->Bind(&if_valuenotnumber);
+      {
+        // Convert to a Number first and try again.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        value_var.Bind(assembler->CallStub(callable, context, value));
+        assembler->Goto(&start);
+      }
+    }
+  }
+
+  assembler->Bind(&do_finc);
+  {
+    Node* finc_value = var_finc_value.value();
+    Node* one = assembler->Float64Constant(1.0);
+    Node* finc_result = assembler->Float64Add(finc_value, one);
+    result_var.Bind(assembler->ChangeFloat64ToTagged(finc_result));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  return result_var.value();
+}
+
+// static
+compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
+                                  compiler::Node* value,
+                                  compiler::Node* context) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  // Shared entry for floating point decrement.
+  Label do_fdec(assembler), end(assembler);
+  Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to try again due to ToNumber conversion.
+  Variable value_var(assembler, MachineRepresentation::kTagged);
+  Variable result_var(assembler, MachineRepresentation::kTagged);
+  Label start(assembler, &value_var);
+  value_var.Bind(value);
+  assembler->Goto(&start);
+  assembler->Bind(&start);
+  {
+    value = value_var.value();
+
+    Label if_issmi(assembler), if_isnotsmi(assembler);
+    assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+
+    assembler->Bind(&if_issmi);
+    {
+      // Try fast Smi subtraction first.
+      Node* one = assembler->SmiConstant(Smi::FromInt(1));
+      Node* pair = assembler->SmiSubWithOverflow(value, one);
+      Node* overflow = assembler->Projection(1, pair);
+
+      // Check if the Smi subtraction overflowed.
+      Label if_overflow(assembler), if_notoverflow(assembler);
+      assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+      assembler->Bind(&if_notoverflow);
+      result_var.Bind(assembler->Projection(0, pair));
+      assembler->Goto(&end);
+
+      assembler->Bind(&if_overflow);
+      {
+        var_fdec_value.Bind(assembler->SmiToFloat64(value));
+        assembler->Goto(&do_fdec);
+      }
+    }
+
+    assembler->Bind(&if_isnotsmi);
+    {
+      // Check if the value is a HeapNumber.
+      Label if_valueisnumber(assembler),
+          if_valuenotnumber(assembler, Label::kDeferred);
+      Node* value_map = assembler->LoadMap(value);
+      Node* number_map = assembler->HeapNumberMapConstant();
+      assembler->Branch(assembler->WordEqual(value_map, number_map),
+                        &if_valueisnumber, &if_valuenotnumber);
+
+      assembler->Bind(&if_valueisnumber);
+      {
+        // Load the HeapNumber value.
+        var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
+        assembler->Goto(&do_fdec);
+      }
+
+      assembler->Bind(&if_valuenotnumber);
+      {
+        // Convert to a Number first and try again.
+        Callable callable =
+            CodeFactory::NonNumberToNumber(assembler->isolate());
+        value_var.Bind(assembler->CallStub(callable, context, value));
+        assembler->Goto(&start);
+      }
+    }
+  }
+
+  assembler->Bind(&do_fdec);
+  {
+    Node* fdec_value = var_fdec_value.value();
+    Node* one = assembler->Float64Constant(1.0);
+    Node* fdec_result = assembler->Float64Sub(fdec_value, one);
+    result_var.Bind(assembler->ChangeFloat64ToTagged(fdec_result));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  return result_var.value();
+}
+
+void InstanceOfStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* object = assembler->Parameter(0);
+  Node* callable = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  Label return_runtime(assembler, Label::kDeferred);
+
+  // Check if no one installed @@hasInstance somewhere.
+  assembler->GotoUnless(
+      assembler->WordEqual(
+          assembler->LoadObjectField(
+              assembler->LoadRoot(Heap::kHasInstanceProtectorRootIndex),
+              PropertyCell::kValueOffset),
+          assembler->SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
+      &return_runtime);
+
+  // Check if {callable} is a valid receiver.
+  assembler->GotoIf(assembler->WordIsSmi(callable), &return_runtime);
+  assembler->GotoIf(
+      assembler->Word32Equal(
+          assembler->Word32And(
+              assembler->LoadMapBitField(assembler->LoadMap(callable)),
+              assembler->Int32Constant(1 << Map::kIsCallable)),
+          assembler->Int32Constant(0)),
+      &return_runtime);
+
+  // Use the inline OrdinaryHasInstance directly.
+  assembler->Return(assembler->OrdinaryHasInstance(context, callable, object));
+
+  // TODO(bmeurer): Use GetPropertyStub here once available.
+  assembler->Bind(&return_runtime);
+  assembler->TailCallRuntime(Runtime::kInstanceOf, context, object, callable);
 }
 
 namespace {
@@ -1082,11 +1873,11 @@
   kGreaterThanOrEqual
 };
 
-void GenerateAbstractRelationalComparison(
-    compiler::CodeStubAssembler* assembler, RelationalComparisonMode mode) {
-  typedef compiler::CodeStubAssembler::Label Label;
+void GenerateAbstractRelationalComparison(CodeStubAssembler* assembler,
+                                          RelationalComparisonMode mode) {
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* context = assembler->Parameter(2);
 
@@ -1425,17 +2216,16 @@
 
 enum ResultMode { kDontNegateResult, kNegateResult };
 
-void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
-                        compiler::Node* value,
-                        compiler::CodeStubAssembler::Label* if_equal,
-                        compiler::CodeStubAssembler::Label* if_notequal) {
+void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
+                        CodeStubAssembler::Label* if_equal,
+                        CodeStubAssembler::Label* if_notequal) {
   // In case of abstract or strict equality checks, we need additional checks
   // for NaN values because they are not considered equal, even if both the
   // left and the right hand side reference exactly the same value.
   // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
   // seems to be what is tested in the current SIMD.js testsuite.
 
-  typedef compiler::CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
 
   // Check if {value} is a Smi or a HeapObject.
@@ -1472,11 +2262,10 @@
 }
 
 void GenerateEqual_Simd128Value_HeapObject(
-    compiler::CodeStubAssembler* assembler, compiler::Node* lhs,
-    compiler::Node* lhs_map, compiler::Node* rhs, compiler::Node* rhs_map,
-    compiler::CodeStubAssembler::Label* if_equal,
-    compiler::CodeStubAssembler::Label* if_notequal) {
-  typedef compiler::CodeStubAssembler::Label Label;
+    CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
+    compiler::Node* rhs, compiler::Node* rhs_map,
+    CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
 
   // Check if {lhs} and {rhs} have the same map.
@@ -1551,14 +2340,14 @@
 }
 
 // ES6 section 7.2.12 Abstract Equality Comparison
-void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
+void GenerateEqual(CodeStubAssembler* assembler, ResultMode mode) {
   // This is a slightly optimized version of Object::Equals represented as
   // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
   // change something functionality wise in here, remember to update the
   // Object::Equals method as well.
-  typedef compiler::CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* context = assembler->Parameter(2);
 
@@ -2079,8 +2868,7 @@
   assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
 }
 
-void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
-                         ResultMode mode) {
+void GenerateStrictEqual(CodeStubAssembler* assembler, ResultMode mode) {
   // Here's pseudo-code for the algorithm below in case of kDontNegateResult
   // mode; for kNegateResult mode we properly negate the result.
   //
@@ -2129,7 +2917,7 @@
   //   }
   // }
 
-  typedef compiler::CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
 
   Node* lhs = assembler->Parameter(0);
@@ -2336,11 +3124,11 @@
   assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
 }
 
-void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
+void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
                                         RelationalComparisonMode mode) {
-  typedef compiler::CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* lhs = assembler->Parameter(0);
   Node* rhs = assembler->Parameter(1);
@@ -2519,8 +3307,7 @@
   }
 }
 
-void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
-                         ResultMode mode) {
+void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
   // Here's pseudo-code for the algorithm below in case of kDontNegateResult
   // mode; for kNegateResult mode we properly negate the result.
   //
@@ -2537,9 +3324,9 @@
   // }
   // return %StringEqual(lhs, rhs);
 
-  typedef compiler::CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* lhs = assembler->Parameter(0);
   Node* rhs = assembler->Parameter(1);
@@ -2698,80 +3485,86 @@
 
 }  // namespace
 
-void LessThanStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  Node* context = assembler->Parameter(3);
+  Node* receiver = assembler->Parameter(0);
+  // For now we only support receiver_is_holder.
+  DCHECK(receiver_is_holder());
+  Node* holder = receiver;
+  Node* map = assembler->LoadMap(receiver);
+  Node* descriptors = assembler->LoadMapDescriptors(map);
+  Node* offset =
+      assembler->Int32Constant(DescriptorArray::ToValueIndex(index()));
+  Node* callback = assembler->LoadFixedArrayElement(descriptors, offset);
+  assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
+                          holder, callback);
+}
+
+void LessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateAbstractRelationalComparison(assembler, kLessThan);
 }
 
-void LessThanOrEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void LessThanOrEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual);
 }
 
-void GreaterThanStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void GreaterThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateAbstractRelationalComparison(assembler, kGreaterThan);
 }
 
 void GreaterThanOrEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+    CodeStubAssembler* assembler) const {
   GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual);
 }
 
-void EqualStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
+void EqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateEqual(assembler, kDontNegateResult);
 }
 
-void NotEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void NotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateEqual(assembler, kNegateResult);
 }
 
-void StrictEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void StrictEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateStrictEqual(assembler, kDontNegateResult);
 }
 
-void StrictNotEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void StrictNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateStrictEqual(assembler, kNegateResult);
 }
 
-void StringEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void StringEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateStringEqual(assembler, kDontNegateResult);
 }
 
-void StringNotEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void StringNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateStringEqual(assembler, kNegateResult);
 }
 
-void StringLessThanStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void StringLessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   GenerateStringRelationalComparison(assembler, kLessThan);
 }
 
 void StringLessThanOrEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+    CodeStubAssembler* assembler) const {
   GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
 }
 
 void StringGreaterThanStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+    CodeStubAssembler* assembler) const {
   GenerateStringRelationalComparison(assembler, kGreaterThan);
 }
 
 void StringGreaterThanOrEqualStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+    CodeStubAssembler* assembler) const {
   GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
 }
 
-void ToLengthStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
-  typedef compiler::CodeStubAssembler::Label Label;
+void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* context = assembler->Parameter(1);
 
@@ -2844,13 +3637,13 @@
   }
 }
 
-void ToBooleanStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+void ToBooleanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Label Label;
 
   Node* value = assembler->Parameter(0);
-  Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
+  Label if_valueissmi(assembler), if_valueisnotsmi(assembler),
+      return_true(assembler), return_false(assembler);
 
   // Check if {value} is a Smi or a HeapObject.
   assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
@@ -2859,21 +3652,15 @@
   assembler->Bind(&if_valueissmi);
   {
     // The {value} is a Smi, only need to check against zero.
-    Label if_valueiszero(assembler), if_valueisnotzero(assembler);
     assembler->Branch(assembler->SmiEqual(value, assembler->SmiConstant(0)),
-                      &if_valueiszero, &if_valueisnotzero);
-
-    assembler->Bind(&if_valueiszero);
-    assembler->Return(assembler->BooleanConstant(false));
-
-    assembler->Bind(&if_valueisnotzero);
-    assembler->Return(assembler->BooleanConstant(true));
+                      &return_false, &return_true);
   }
 
   assembler->Bind(&if_valueisnotsmi);
   {
-    Label if_valueisstring(assembler), if_valueisheapnumber(assembler),
-        if_valueisoddball(assembler), if_valueisother(assembler);
+    Label if_valueisstring(assembler), if_valueisnotstring(assembler),
+        if_valueisheapnumber(assembler), if_valueisoddball(assembler),
+        if_valueisother(assembler);
 
     // The {value} is a HeapObject, load its map.
     Node* value_map = assembler->LoadMap(value);
@@ -2885,24 +3672,20 @@
 
     // Dispatch based on the instance type; we distinguish all String instance
     // types, the HeapNumber type and the Oddball type.
-    size_t const kNumCases = FIRST_NONSTRING_TYPE + 2;
+    assembler->Branch(assembler->Int32LessThan(
+                          value_instance_type,
+                          assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+                      &if_valueisstring, &if_valueisnotstring);
+    assembler->Bind(&if_valueisnotstring);
+    size_t const kNumCases = 2;
     Label* case_labels[kNumCases];
     int32_t case_values[kNumCases];
-    for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
-      case_labels[i] = new Label(assembler);
-      case_values[i] = i;
-    }
-    case_labels[FIRST_NONSTRING_TYPE + 0] = &if_valueisheapnumber;
-    case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
-    case_labels[FIRST_NONSTRING_TYPE + 1] = &if_valueisoddball;
-    case_values[FIRST_NONSTRING_TYPE + 1] = ODDBALL_TYPE;
+    case_labels[0] = &if_valueisheapnumber;
+    case_values[0] = HEAP_NUMBER_TYPE;
+    case_labels[1] = &if_valueisoddball;
+    case_values[1] = ODDBALL_TYPE;
     assembler->Switch(value_instance_type, &if_valueisother, case_values,
                       case_labels, arraysize(case_values));
-    for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
-      assembler->Bind(case_labels[i]);
-      assembler->Goto(&if_valueisstring);
-      delete case_labels[i];
-    }
 
     assembler->Bind(&if_valueisstring);
     {
@@ -2911,16 +3694,9 @@
           assembler->LoadObjectField(value, String::kLengthOffset);
 
       // Check if the {value} is the empty string.
-      Label if_valueisempty(assembler), if_valueisnotempty(assembler);
       assembler->Branch(
           assembler->SmiEqual(value_length, assembler->SmiConstant(0)),
-          &if_valueisempty, &if_valueisnotempty);
-
-      assembler->Bind(&if_valueisempty);
-      assembler->Return(assembler->BooleanConstant(false));
-
-      assembler->Bind(&if_valueisnotempty);
-      assembler->Return(assembler->BooleanConstant(true));
+          &return_false, &return_true);
     }
 
     assembler->Bind(&if_valueisheapnumber);
@@ -2929,25 +3705,15 @@
           MachineType::Float64(), value,
           assembler->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
 
-      Label if_valueispositive(assembler), if_valueisnotpositive(assembler),
-          if_valueisnegative(assembler), if_valueisnanorzero(assembler);
+      Label if_valueisnotpositive(assembler);
       assembler->Branch(assembler->Float64LessThan(
                             assembler->Float64Constant(0.0), value_value),
-                        &if_valueispositive, &if_valueisnotpositive);
-
-      assembler->Bind(&if_valueispositive);
-      assembler->Return(assembler->BooleanConstant(true));
+                        &return_true, &if_valueisnotpositive);
 
       assembler->Bind(&if_valueisnotpositive);
       assembler->Branch(assembler->Float64LessThan(
                             value_value, assembler->Float64Constant(0.0)),
-                        &if_valueisnegative, &if_valueisnanorzero);
-
-      assembler->Bind(&if_valueisnegative);
-      assembler->Return(assembler->BooleanConstant(true));
-
-      assembler->Bind(&if_valueisnanorzero);
-      assembler->Return(assembler->BooleanConstant(false));
+                        &return_true, &return_false);
     }
 
     assembler->Bind(&if_valueisoddball);
@@ -2968,26 +3734,22 @@
           assembler->Int32Constant(1 << Map::kIsUndetectable));
 
       // Check if the {value} is undetectable.
-      Label if_valueisundetectable(assembler),
-          if_valueisnotundetectable(assembler);
       assembler->Branch(assembler->Word32Equal(value_map_undetectable,
                                                assembler->Int32Constant(0)),
-                        &if_valueisnotundetectable, &if_valueisundetectable);
-
-      assembler->Bind(&if_valueisundetectable);
-      assembler->Return(assembler->BooleanConstant(false));
-
-      assembler->Bind(&if_valueisnotundetectable);
-      assembler->Return(assembler->BooleanConstant(true));
+                        &return_true, &return_false);
     }
   }
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
 }
 
-void ToIntegerStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
-  typedef compiler::CodeStubAssembler::Label Label;
+void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Variable Variable;
+  typedef CodeStubAssembler::Variable Variable;
 
   Node* context = assembler->Parameter(1);
 
@@ -3046,7 +3808,7 @@
 }
 
 void StoreInterceptorStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+    CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
   Node* receiver = assembler->Parameter(0);
   Node* name = assembler->Parameter(1);
@@ -3057,9 +3819,9 @@
 }
 
 void LoadIndexedInterceptorStub::GenerateAssembly(
-    compiler::CodeStubAssembler* assembler) const {
+    CodeStubAssembler* assembler) const {
   typedef compiler::Node Node;
-  typedef compiler::CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Label Label;
   Node* receiver = assembler->Parameter(0);
   Node* key = assembler->Parameter(1);
   Node* slot = assembler->Parameter(2);
@@ -3078,6 +3840,133 @@
                              slot, vector);
 }
 
+// static
+bool FastCloneShallowObjectStub::IsSupported(ObjectLiteral* expr) {
+  // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
+  // support copy-on-write (COW) elements for now.
+  // TODO(mvstanton): make object literals support COW elements.
+  return expr->fast_elements() && expr->has_shallow_properties() &&
+         expr->properties_count() <= kMaximumClonedProperties;
+}
+
+// static
+int FastCloneShallowObjectStub::PropertiesCount(int literal_length) {
+  // This heuristic of setting empty literals to have
+  // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
+  // runtime.
+  // TODO(verwaest): Unify this with the heuristic in the runtime.
+  return literal_length == 0
+             ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
+             : literal_length;
+}
+
+// static
+compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
+    CodeStubAssembler* assembler, compiler::CodeAssembler::Label* call_runtime,
+    compiler::Node* closure, compiler::Node* literals_index,
+    compiler::Node* properties_count) {
+  typedef compiler::Node Node;
+  typedef compiler::CodeAssembler::Label Label;
+  typedef compiler::CodeAssembler::Variable Variable;
+
+  Node* undefined = assembler->UndefinedConstant();
+  Node* literals_array =
+      assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
+  Node* allocation_site = assembler->LoadFixedArrayElement(
+      literals_array, literals_index,
+      LiteralsArray::kFirstLiteralIndex * kPointerSize,
+      CodeStubAssembler::SMI_PARAMETERS);
+  assembler->GotoIf(assembler->WordEqual(allocation_site, undefined),
+                    call_runtime);
+
+  // Calculate the object and allocation size based on the properties count.
+  Node* object_size = assembler->IntPtrAdd(
+      assembler->WordShl(properties_count, kPointerSizeLog2),
+      assembler->IntPtrConstant(JSObject::kHeaderSize));
+  Node* allocation_size = object_size;
+  if (FLAG_allocation_site_pretenuring) {
+    allocation_size = assembler->IntPtrAdd(
+        object_size, assembler->IntPtrConstant(AllocationMemento::kSize));
+  }
+  Node* boilerplate = assembler->LoadObjectField(
+      allocation_site, AllocationSite::kTransitionInfoOffset);
+  Node* boilerplate_map = assembler->LoadMap(boilerplate);
+  Node* instance_size = assembler->LoadMapInstanceSize(boilerplate_map);
+  Node* size_in_words = assembler->WordShr(object_size, kPointerSizeLog2);
+  assembler->GotoUnless(assembler->Word32Equal(instance_size, size_in_words),
+                        call_runtime);
+
+  Node* copy = assembler->Allocate(allocation_size);
+
+  // Copy boilerplate elements.
+  Variable offset(assembler, MachineType::PointerRepresentation());
+  offset.Bind(assembler->IntPtrConstant(-kHeapObjectTag));
+  Node* end_offset = assembler->IntPtrAdd(object_size, offset.value());
+  Label loop_body(assembler, &offset), loop_check(assembler, &offset);
+  // We should always have an object size greater than zero.
+  assembler->Goto(&loop_body);
+  assembler->Bind(&loop_body);
+  {
+    // The Allocate above guarantees that the copy lies in new space. This
+    // allows us to skip write barriers. This is necessary since we may also be
+    // copying unboxed doubles.
+    Node* field =
+        assembler->Load(MachineType::IntPtr(), boilerplate, offset.value());
+    assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
+                                   offset.value(), field);
+    assembler->Goto(&loop_check);
+  }
+  assembler->Bind(&loop_check);
+  {
+    offset.Bind(assembler->IntPtrAdd(offset.value(),
+                                     assembler->IntPtrConstant(kPointerSize)));
+    assembler->GotoUnless(
+        assembler->IntPtrGreaterThanOrEqual(offset.value(), end_offset),
+        &loop_body);
+  }
+
+  if (FLAG_allocation_site_pretenuring) {
+    Node* memento = assembler->InnerAllocate(copy, object_size);
+    assembler->StoreObjectFieldNoWriteBarrier(
+        memento, HeapObject::kMapOffset,
+        assembler->LoadRoot(Heap::kAllocationMementoMapRootIndex));
+    assembler->StoreObjectFieldNoWriteBarrier(
+        memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
+    Node* memento_create_count = assembler->LoadObjectField(
+        allocation_site, AllocationSite::kPretenureCreateCountOffset);
+    memento_create_count = assembler->SmiAdd(
+        memento_create_count, assembler->SmiConstant(Smi::FromInt(1)));
+    assembler->StoreObjectFieldNoWriteBarrier(
+        allocation_site, AllocationSite::kPretenureCreateCountOffset,
+        memento_create_count);
+  }
+
+  // TODO(verwaest): Allocate and fill in double boxes.
+  return copy;
+}
+
+void FastCloneShallowObjectStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  Label call_runtime(assembler);
+  Node* closure = assembler->Parameter(0);
+  Node* literals_index = assembler->Parameter(1);
+
+  Node* properties_count =
+      assembler->IntPtrConstant(PropertiesCount(this->length()));
+  Node* copy = GenerateFastPath(assembler, &call_runtime, closure,
+                                literals_index, properties_count);
+  assembler->Return(copy);
+
+  assembler->Bind(&call_runtime);
+  Node* constant_properties = assembler->Parameter(2);
+  Node* flags = assembler->Parameter(3);
+  Node* context = assembler->Parameter(4);
+  assembler->TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+                             literals_index, constant_properties, flags);
+}
+
 template<class StateType>
 void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
   // Note: Although a no-op transition is semantically OK, it is hinting at a
@@ -3191,11 +4080,7 @@
   return VectorStoreTransitionDescriptor(isolate());
 }
 
-
-void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(Runtime::FunctionForId(Runtime::kNewClosure)->entry);
-}
-
+void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {}
 
 void FastNewContextStub::InitializeDescriptor(CodeStubDescriptor* d) {}
 
@@ -3224,14 +4109,6 @@
 }
 
 
-void FastCloneShallowObjectStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  FastCloneShallowObjectDescriptor call_descriptor(isolate());
-  descriptor->Initialize(
-      Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
 void CreateAllocationSiteStub::InitializeDescriptor(CodeStubDescriptor* d) {}
 
 
@@ -3259,11 +4136,6 @@
 }
 
 
-void AllocateMutableHeapNumberStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  descriptor->Initialize();
-}
-
 #define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
   void Allocate##Type##Stub::InitializeDescriptor(                 \
       CodeStubDescriptor* descriptor) {                            \
@@ -3273,11 +4145,6 @@
 SIMD128_TYPES(SIMD128_INIT_DESC)
 #undef SIMD128_INIT_DESC
 
-void AllocateInNewSpaceStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  descriptor->Initialize();
-}
-
 void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
   descriptor->SetMissHandler(ExternalReference(
@@ -3316,6 +4183,123 @@
   stub.GetCode();
 }
 
+void HasPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* key = assembler->Parameter(0);
+  Node* object = assembler->Parameter(1);
+  Node* context = assembler->Parameter(2);
+
+  Label call_runtime(assembler), return_true(assembler),
+      return_false(assembler);
+
+  // Ensure object is JSReceiver, otherwise call runtime to throw error.
+  Label if_objectisnotsmi(assembler);
+  assembler->Branch(assembler->WordIsSmi(object), &call_runtime,
+                    &if_objectisnotsmi);
+  assembler->Bind(&if_objectisnotsmi);
+
+  Node* map = assembler->LoadMap(object);
+  Node* instance_type = assembler->LoadMapInstanceType(map);
+  {
+    Label if_objectisreceiver(assembler);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    assembler->Branch(
+        assembler->Int32GreaterThanOrEqual(
+            instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+        &if_objectisreceiver, &call_runtime);
+    assembler->Bind(&if_objectisreceiver);
+  }
+
+  Variable var_index(assembler, MachineRepresentation::kWord32);
+
+  Label keyisindex(assembler), if_iskeyunique(assembler);
+  assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
+                       &call_runtime);
+
+  assembler->Bind(&if_iskeyunique);
+  {
+    Variable var_object(assembler, MachineRepresentation::kTagged);
+    Variable var_map(assembler, MachineRepresentation::kTagged);
+    Variable var_instance_type(assembler, MachineRepresentation::kWord8);
+
+    Variable* merged_variables[] = {&var_object, &var_map, &var_instance_type};
+    Label loop(assembler, arraysize(merged_variables), merged_variables);
+    var_object.Bind(object);
+    var_map.Bind(map);
+    var_instance_type.Bind(instance_type);
+    assembler->Goto(&loop);
+    assembler->Bind(&loop);
+    {
+      Label next_proto(assembler);
+      assembler->TryLookupProperty(var_object.value(), var_map.value(),
+                                   var_instance_type.value(), key, &return_true,
+                                   &next_proto, &call_runtime);
+      assembler->Bind(&next_proto);
+
+      Node* proto = assembler->LoadMapPrototype(var_map.value());
+
+      Label if_not_null(assembler);
+      assembler->Branch(assembler->WordEqual(proto, assembler->NullConstant()),
+                        &return_false, &if_not_null);
+      assembler->Bind(&if_not_null);
+
+      Node* map = assembler->LoadMap(proto);
+      Node* instance_type = assembler->LoadMapInstanceType(map);
+
+      var_object.Bind(proto);
+      var_map.Bind(map);
+      var_instance_type.Bind(instance_type);
+      assembler->Goto(&loop);
+    }
+  }
+  assembler->Bind(&keyisindex);
+  {
+    Variable var_object(assembler, MachineRepresentation::kTagged);
+    Variable var_map(assembler, MachineRepresentation::kTagged);
+    Variable var_instance_type(assembler, MachineRepresentation::kWord8);
+
+    Variable* merged_variables[] = {&var_object, &var_map, &var_instance_type};
+    Label loop(assembler, arraysize(merged_variables), merged_variables);
+    var_object.Bind(object);
+    var_map.Bind(map);
+    var_instance_type.Bind(instance_type);
+    assembler->Goto(&loop);
+    assembler->Bind(&loop);
+    {
+      Label next_proto(assembler);
+      assembler->TryLookupElement(var_object.value(), var_map.value(),
+                                  var_instance_type.value(), var_index.value(),
+                                  &return_true, &next_proto, &call_runtime);
+      assembler->Bind(&next_proto);
+
+      Node* proto = assembler->LoadMapPrototype(var_map.value());
+
+      Label if_not_null(assembler);
+      assembler->Branch(assembler->WordEqual(proto, assembler->NullConstant()),
+                        &return_false, &if_not_null);
+      assembler->Bind(&if_not_null);
+
+      Node* map = assembler->LoadMap(proto);
+      Node* instance_type = assembler->LoadMapInstanceType(map);
+
+      var_object.Bind(proto);
+      var_map.Bind(map);
+      var_instance_type.Bind(instance_type);
+      assembler->Goto(&loop);
+    }
+  }
+  assembler->Bind(&return_true);
+  assembler->Return(assembler->BooleanConstant(true));
+
+  assembler->Bind(&return_false);
+  assembler->Return(assembler->BooleanConstant(false));
+
+  assembler->Bind(&call_runtime);
+  assembler->TailCallRuntime(Runtime::kHasProperty, context, key, object);
+}
 
 void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
   CreateAllocationSiteStub stub(isolate);
@@ -3472,6 +4456,43 @@
   entry_hook(function, stack_pointer);
 }
 
+void ArrayNoArgumentConstructorStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  Node* native_context = assembler->LoadObjectField(
+      assembler->Parameter(
+          ArrayNoArgumentConstructorDescriptor::kFunctionIndex),
+      JSFunction::kContextOffset);
+  bool track_allocation_site =
+      AllocationSite::GetMode(elements_kind()) == TRACK_ALLOCATION_SITE &&
+      override_mode() != DISABLE_ALLOCATION_SITES;
+  Node* allocation_site =
+      track_allocation_site
+          ? assembler->Parameter(
+                ArrayNoArgumentConstructorDescriptor::kAllocationSiteIndex)
+          : nullptr;
+  Node* array_map =
+      assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+  Node* array = assembler->AllocateJSArray(
+      elements_kind(), array_map,
+      assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
+      assembler->IntPtrConstant(0), allocation_site);
+  assembler->Return(array);
+}
+
+void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
+    CodeStubAssembler* assembler) const {
+  typedef compiler::Node Node;
+  Node* array_map = assembler->LoadObjectField(
+      assembler->Parameter(
+          ArrayNoArgumentConstructorDescriptor::kFunctionIndex),
+      JSFunction::kPrototypeOrInitialMapOffset);
+  Node* array = assembler->AllocateJSArray(
+      elements_kind(), array_map,
+      assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
+      assembler->IntPtrConstant(0), nullptr);
+  assembler->Return(array);
+}
 
 ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
     : PlatformCodeStub(isolate) {
diff --git a/src/code-stubs.h b/src/code-stubs.h
index ace4aae..7bccaa9 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -7,8 +7,8 @@
 
 #include "src/allocation.h"
 #include "src/assembler.h"
+#include "src/code-stub-assembler.h"
 #include "src/codegen.h"
-#include "src/compiler/code-stub-assembler.h"
 #include "src/globals.h"
 #include "src/ic/ic-state.h"
 #include "src/interface-descriptors.h"
@@ -31,7 +31,6 @@
   V(CompareIC)                              \
   V(DoubleToI)                              \
   V(FunctionPrototype)                      \
-  V(InstanceOf)                             \
   V(InternalArrayConstructor)               \
   V(JSEntry)                                \
   V(KeyedLoadICTrampoline)                  \
@@ -57,9 +56,7 @@
   V(VectorStoreIC)                          \
   V(VectorKeyedStoreIC)                     \
   /* HydrogenCodeStubs */                   \
-  V(AllocateInNewSpace)                     \
   V(ArrayNArgumentsConstructor)             \
-  V(ArrayNoArgumentConstructor)             \
   V(ArraySingleArgumentConstructor)         \
   V(BinaryOpIC)                             \
   V(BinaryOpWithAllocationSite)             \
@@ -69,7 +66,6 @@
   V(FastArrayPush)                          \
   V(FastCloneRegExp)                        \
   V(FastCloneShallowArray)                  \
-  V(FastCloneShallowObject)                 \
   V(FastNewClosure)                         \
   V(FastNewContext)                         \
   V(FastNewObject)                          \
@@ -78,7 +74,6 @@
   V(FastNewStrictArguments)                 \
   V(GrowArrayElements)                      \
   V(InternalArrayNArgumentsConstructor)     \
-  V(InternalArrayNoArgumentConstructor)     \
   V(InternalArraySingleArgumentConstructor) \
   V(KeyedLoadGeneric)                       \
   V(LoadGlobalViaContext)                   \
@@ -98,7 +93,6 @@
   V(LoadIC)                                 \
   /* TurboFanCodeStubs */                   \
   V(AllocateHeapNumber)                     \
-  V(AllocateMutableHeapNumber)              \
   V(AllocateFloat32x4)                      \
   V(AllocateInt32x4)                        \
   V(AllocateUint32x4)                       \
@@ -109,12 +103,24 @@
   V(AllocateInt8x16)                        \
   V(AllocateUint8x16)                       \
   V(AllocateBool8x16)                       \
+  V(ArrayNoArgumentConstructor)             \
   V(StringLength)                           \
   V(Add)                                    \
   V(Subtract)                               \
+  V(Multiply)                               \
+  V(Divide)                                 \
+  V(Modulus)                                \
+  V(ShiftRight)                             \
+  V(ShiftRightLogical)                      \
+  V(ShiftLeft)                              \
   V(BitwiseAnd)                             \
   V(BitwiseOr)                              \
   V(BitwiseXor)                             \
+  V(Inc)                                    \
+  V(InternalArrayNoArgumentConstructor)     \
+  V(Dec)                                    \
+  V(FastCloneShallowObject)                 \
+  V(InstanceOf)                             \
   V(LessThan)                               \
   V(LessThanOrEqual)                        \
   V(GreaterThan)                            \
@@ -132,17 +138,19 @@
   V(ToBoolean)                              \
   V(ToInteger)                              \
   V(ToLength)                               \
+  V(HasProperty)                            \
   /* IC Handler stubs */                    \
   V(ArrayBufferViewLoadField)               \
+  V(KeyedLoadSloppyArguments)               \
+  V(KeyedStoreSloppyArguments)              \
+  V(LoadApiGetter)                          \
   V(LoadConstant)                           \
   V(LoadFastElement)                        \
   V(LoadField)                              \
   V(LoadIndexedInterceptor)                 \
-  V(KeyedLoadSloppyArguments)               \
-  V(KeyedStoreSloppyArguments)              \
   V(StoreField)                             \
-  V(StoreInterceptor)                       \
   V(StoreGlobal)                            \
+  V(StoreInterceptor)                       \
   V(StoreTransition)
 
 // List of code stubs only used on ARM 32 bits platforms.
@@ -284,7 +292,6 @@
 
   virtual InlineCacheState GetICState() const { return UNINITIALIZED; }
   virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
-  virtual Code::StubType GetStubType() const { return Code::NORMAL; }
 
   Code::Flags GetCodeFlags() const;
 
@@ -390,10 +397,32 @@
   Handle<Code> GenerateCode() override;                               \
   DEFINE_CODE_STUB(NAME, SUPER)
 
-#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER)                  \
- public:                                                        \
-  void GenerateAssembly(compiler::CodeStubAssembler* assembler) \
-      const override;                                           \
+#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER)                        \
+ public:                                                              \
+  void GenerateAssembly(CodeStubAssembler* assembler) const override; \
+  DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(NAME, SUPER)                       \
+ public:                                                                       \
+  static compiler::Node* Generate(CodeStubAssembler* assembler,                \
+                                  compiler::Node* left, compiler::Node* right, \
+                                  compiler::Node* context);                    \
+  void GenerateAssembly(CodeStubAssembler* assembler) const override {         \
+    assembler->Return(Generate(assembler, assembler->Parameter(0),             \
+                               assembler->Parameter(1),                        \
+                               assembler->Parameter(2)));                      \
+  }                                                                            \
+  DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(NAME, SUPER)                \
+ public:                                                               \
+  static compiler::Node* Generate(CodeStubAssembler* assembler,        \
+                                  compiler::Node* value,               \
+                                  compiler::Node* context);            \
+  void GenerateAssembly(CodeStubAssembler* assembler) const override { \
+    assembler->Return(Generate(assembler, assembler->Parameter(0),     \
+                               assembler->Parameter(1)));              \
+  }                                                                    \
   DEFINE_CODE_STUB(NAME, SUPER)
 
 #define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
@@ -579,13 +608,10 @@
     return GetCallInterfaceDescriptor().GetStackParameterCount();
   }
 
-  Code::StubType GetStubType() const override { return Code::FAST; }
-
  protected:
   explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
 
-  virtual void GenerateAssembly(
-      compiler::CodeStubAssembler* assembler) const = 0;
+  virtual void GenerateAssembly(CodeStubAssembler* assembler) const = 0;
 
  private:
   DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
@@ -678,7 +704,7 @@
   explicit AddStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_CODE_STUB(Add, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Add, TurboFanCodeStub);
 };
 
 class SubtractStub final : public TurboFanCodeStub {
@@ -686,7 +712,56 @@
   explicit SubtractStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_CODE_STUB(Subtract, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Subtract, TurboFanCodeStub);
+};
+
+class MultiplyStub final : public TurboFanCodeStub {
+ public:
+  explicit MultiplyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Multiply, TurboFanCodeStub);
+};
+
+class DivideStub final : public TurboFanCodeStub {
+ public:
+  explicit DivideStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Divide, TurboFanCodeStub);
+};
+
+class ModulusStub final : public TurboFanCodeStub {
+ public:
+  explicit ModulusStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Modulus, TurboFanCodeStub);
+};
+
+class ShiftRightStub final : public TurboFanCodeStub {
+ public:
+  explicit ShiftRightStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftRight, TurboFanCodeStub);
+};
+
+class ShiftRightLogicalStub final : public TurboFanCodeStub {
+ public:
+  explicit ShiftRightLogicalStub(Isolate* isolate)
+      : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftRightLogical, TurboFanCodeStub);
+};
+
+class ShiftLeftStub final : public TurboFanCodeStub {
+ public:
+  explicit ShiftLeftStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftLeft, TurboFanCodeStub);
 };
 
 class BitwiseAndStub final : public TurboFanCodeStub {
@@ -694,7 +769,7 @@
   explicit BitwiseAndStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
 };
 
 class BitwiseOrStub final : public TurboFanCodeStub {
@@ -702,7 +777,7 @@
   explicit BitwiseOrStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_CODE_STUB(BitwiseOr, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseOr, TurboFanCodeStub);
 };
 
 class BitwiseXorStub final : public TurboFanCodeStub {
@@ -710,7 +785,32 @@
   explicit BitwiseXorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
-  DEFINE_TURBOFAN_CODE_STUB(BitwiseXor, TurboFanCodeStub);
+  DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseXor, TurboFanCodeStub);
+};
+
+class IncStub final : public TurboFanCodeStub {
+ public:
+  explicit IncStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CountOp);
+  DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(Inc, TurboFanCodeStub);
+};
+
+class DecStub final : public TurboFanCodeStub {
+ public:
+  explicit DecStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CountOp);
+  DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(Dec, TurboFanCodeStub);
+};
+
+class InstanceOfStub final : public TurboFanCodeStub {
+ public:
+  explicit InstanceOfStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ private:
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+  DEFINE_TURBOFAN_CODE_STUB(InstanceOf, TurboFanCodeStub);
 };
 
 class LessThanStub final : public TurboFanCodeStub {
@@ -857,9 +957,11 @@
  public:
   explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
-  void GenerateAssembly(compiler::CodeStubAssembler* assember) const override;
+  void GenerateAssembly(CodeStubAssembler* assember) const override;
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
+  InlineCacheState GetICState() const override { return MONOMORPHIC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
   DEFINE_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
@@ -871,11 +973,22 @@
       : TurboFanCodeStub(isolate) {}
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::KEYED_LOAD_IC; }
+  InlineCacheState GetICState() const override { return MONOMORPHIC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
   DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
 };
 
+// ES6 section 12.10.3 "in" operator evaluation.
+class HasPropertyStub : public TurboFanCodeStub {
+ public:
+  explicit HasPropertyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(HasProperty);
+  DEFINE_TURBOFAN_CODE_STUB(HasProperty, TurboFanCodeStub);
+};
+
 enum StringAddFlags {
   // Omit both parameter checks.
   STRING_ADD_CHECK_NONE = 0,
@@ -942,7 +1055,7 @@
  private:
   STATIC_ASSERT(LANGUAGE_END == 3);
   class LanguageModeBits : public BitField<LanguageMode, 0, 2> {};
-  class FunctionKindBits : public BitField<FunctionKind, 2, 8> {};
+  class FunctionKindBits : public BitField<FunctionKind, 2, 9> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
   DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
@@ -986,11 +1099,19 @@
 // of the strict arguments object materialization code.
 class FastNewRestParameterStub final : public PlatformCodeStub {
  public:
-  explicit FastNewRestParameterStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) {}
+  explicit FastNewRestParameterStub(Isolate* isolate,
+                                    bool skip_stub_frame = false)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
+  }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewRestParameter);
   DEFINE_PLATFORM_CODE_STUB(FastNewRestParameter, PlatformCodeStub);
+
+  int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
+
+ private:
+  class SkipStubFrameBits : public BitField<bool, 0, 1> {};
 };
 
 
@@ -999,11 +1120,19 @@
 // and easy as the current handwritten version.
 class FastNewSloppyArgumentsStub final : public PlatformCodeStub {
  public:
-  explicit FastNewSloppyArgumentsStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) {}
+  explicit FastNewSloppyArgumentsStub(Isolate* isolate,
+                                      bool skip_stub_frame = false)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
+  }
+
+  int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewSloppyArguments);
   DEFINE_PLATFORM_CODE_STUB(FastNewSloppyArguments, PlatformCodeStub);
+
+ private:
+  class SkipStubFrameBits : public BitField<bool, 0, 1> {};
 };
 
 
@@ -1012,11 +1141,19 @@
 // and easy as the current handwritten version.
 class FastNewStrictArgumentsStub final : public PlatformCodeStub {
  public:
-  explicit FastNewStrictArgumentsStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) {}
+  explicit FastNewStrictArgumentsStub(Isolate* isolate,
+                                      bool skip_stub_frame = false)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
+  }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewStrictArguments);
   DEFINE_PLATFORM_CODE_STUB(FastNewStrictArguments, PlatformCodeStub);
+
+  int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
+
+ private:
+  class SkipStubFrameBits : public BitField<bool, 0, 1> {};
 };
 
 
@@ -1049,26 +1186,33 @@
   DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowArray, HydrogenCodeStub);
 };
 
-
-class FastCloneShallowObjectStub : public HydrogenCodeStub {
+class FastCloneShallowObjectStub : public TurboFanCodeStub {
  public:
   // Maximum number of properties in copied object.
   static const int kMaximumClonedProperties = 6;
 
   FastCloneShallowObjectStub(Isolate* isolate, int length)
-      : HydrogenCodeStub(isolate) {
+      : TurboFanCodeStub(isolate) {
     DCHECK_GE(length, 0);
     DCHECK_LE(length, kMaximumClonedProperties);
-    set_sub_minor_key(LengthBits::encode(length));
+    minor_key_ = LengthBits::encode(LengthBits::encode(length));
   }
 
-  int length() const { return LengthBits::decode(sub_minor_key()); }
+  static compiler::Node* GenerateFastPath(
+      CodeStubAssembler* assembler,
+      compiler::CodeAssembler::Label* call_runtime, compiler::Node* closure,
+      compiler::Node* literals_index, compiler::Node* properties_count);
+
+  static bool IsSupported(ObjectLiteral* expr);
+  static int PropertiesCount(int literal_length);
+
+  int length() const { return LengthBits::decode(minor_key_); }
 
  private:
   class LengthBits : public BitField<int, 0, 4> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowObject);
-  DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowObject, HydrogenCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(FastCloneShallowObject, TurboFanCodeStub);
 };
 
 
@@ -1126,24 +1270,6 @@
   DEFINE_HYDROGEN_CODE_STUB(FastArrayPush, HydrogenCodeStub);
 };
 
-class InstanceOfStub final : public PlatformCodeStub {
- public:
-  explicit InstanceOfStub(Isolate* isolate, bool es6_instanceof = false)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = IsES6InstanceOfBits::encode(es6_instanceof);
-  }
-
-  bool is_es6_instanceof() const {
-    return IsES6InstanceOfBits::decode(minor_key_);
-  }
-
- private:
-  class IsES6InstanceOfBits : public BitField<bool, 0, 1> {};
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(InstanceOf);
-  DEFINE_PLATFORM_CODE_STUB(InstanceOf, PlatformCodeStub);
-};
-
 
 enum AllocationSiteOverrideMode {
   DONT_OVERRIDE,
@@ -1280,7 +1406,6 @@
       : PlatformCodeStub(isolate) {}
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
   DEFINE_PLATFORM_CODE_STUB(LoadIndexedString, PlatformCodeStub);
@@ -1320,7 +1445,6 @@
 
  protected:
   Code::Kind kind() const override { return Code::LOAD_IC; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
 
  private:
   class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -1346,7 +1470,6 @@
 
  protected:
   Code::Kind kind() const override { return Code::LOAD_IC; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
 
  private:
   class ArrayBufferViewLoadFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -1362,7 +1485,6 @@
 
  protected:
   Code::Kind kind() const override { return Code::KEYED_LOAD_IC; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
 
  private:
   DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
@@ -1381,7 +1503,6 @@
 
  protected:
   Code::Kind kind() const override { return Code::KEYED_STORE_IC; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
 
  private:
   DEFINE_HANDLER_CODE_STUB(KeyedStoreSloppyArguments, HandlerStub);
@@ -1401,7 +1522,6 @@
 
  protected:
   Code::Kind kind() const override { return Code::LOAD_IC; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
 
  private:
   class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
@@ -1409,6 +1529,33 @@
   DEFINE_HANDLER_CODE_STUB(LoadConstant, HandlerStub);
 };
 
+class LoadApiGetterStub : public TurboFanCodeStub {
+ public:
+  LoadApiGetterStub(Isolate* isolate, bool receiver_is_holder, int index)
+      : TurboFanCodeStub(isolate) {
+    // If that's not true, we need to ensure that the receiver is actually a
+    // JSReceiver. http://crbug.com/609134
+    DCHECK(receiver_is_holder);
+    minor_key_ = IndexBits::encode(index) |
+                 ReceiverIsHolderBits::encode(receiver_is_holder);
+  }
+
+  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
+  InlineCacheState GetICState() const override { return MONOMORPHIC; }
+
+  int index() const { return IndexBits::decode(minor_key_); }
+  bool receiver_is_holder() const {
+    return ReceiverIsHolderBits::decode(minor_key_);
+  }
+
+ private:
+  class ReceiverIsHolderBits : public BitField<bool, 0, 1> {};
+  class IndexBits : public BitField<int, 1, kDescriptorIndexBitCount> {};
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+  DEFINE_TURBOFAN_CODE_STUB(LoadApiGetter, TurboFanCodeStub);
+};
 
 class StoreFieldStub : public HandlerStub {
  public:
@@ -1433,7 +1580,6 @@
 
  protected:
   Code::Kind kind() const override { return Code::STORE_IC; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
 
  private:
   class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -1543,7 +1689,6 @@
 
  protected:
   Code::Kind kind() const override { return Code::STORE_IC; }
-  Code::StubType GetStubType() const override { return Code::FAST; }
 
  private:
   class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -2492,13 +2637,11 @@
 
  private:
   static const int kContextIndexBits = 9;
-  static const int kSlotIndexBits = 13;
+  static const int kSlotIndexBits = 12;
   class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
   class SlotIndexBits
       : public BitField<int, kContextIndexBits, kSlotIndexBits> {};
 
-  Code::StubType GetStubType() const override { return Code::FAST; }
-
   DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, HandlerStub);
 };
 
@@ -2631,24 +2774,12 @@
       : TurboFanCodeStub(isolate) {}
 
   void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
-  void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
+  void GenerateAssembly(CodeStubAssembler* assembler) const override;
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
   DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
 };
 
-class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
- public:
-  explicit AllocateMutableHeapNumberStub(Isolate* isolate)
-      : TurboFanCodeStub(isolate) {}
-
-  void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
-  void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
-  DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub);
-};
-
 #define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type)     \
   class Allocate##Type##Stub : public TurboFanCodeStub {                \
    public:                                                              \
@@ -2656,8 +2787,7 @@
         : TurboFanCodeStub(isolate) {}                                  \
                                                                         \
     void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
-    void GenerateAssembly(                                              \
-        compiler::CodeStubAssembler* assembler) const override;         \
+    void GenerateAssembly(CodeStubAssembler* assembler) const override; \
                                                                         \
     DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type);                   \
     DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub);                 \
@@ -2665,17 +2795,6 @@
 SIMD128_TYPES(SIMD128_ALLOC_STUB)
 #undef SIMD128_ALLOC_STUB
 
-class AllocateInNewSpaceStub final : public HydrogenCodeStub {
- public:
-  explicit AllocateInNewSpaceStub(Isolate* isolate)
-      : HydrogenCodeStub(isolate) {}
-
- private:
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateInNewSpace);
-  DEFINE_HYDROGEN_CODE_STUB(AllocateInNewSpace, HydrogenCodeStub);
-};
-
-
 class ArrayConstructorStubBase : public HydrogenCodeStub {
  public:
   ArrayConstructorStubBase(Isolate* isolate,
@@ -2720,26 +2839,77 @@
   DEFINE_CODE_STUB_BASE(ArrayConstructorStubBase, HydrogenCodeStub);
 };
 
+class CommonArrayConstructorStub : public TurboFanCodeStub {
+ protected:
+  CommonArrayConstructorStub(Isolate* isolate, ElementsKind kind,
+                             AllocationSiteOverrideMode override_mode)
+      : TurboFanCodeStub(isolate) {
+    // It only makes sense to override local allocation site behavior
+    // if there is a difference between the global allocation site policy
+    // for an ElementsKind and the desired usage of the stub.
+    DCHECK(override_mode != DISABLE_ALLOCATION_SITES ||
+           AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
+    set_sub_minor_key(ElementsKindBits::encode(kind) |
+                      AllocationSiteOverrideModeBits::encode(override_mode));
+  }
 
-class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
+  void set_sub_minor_key(uint32_t key) { minor_key_ = key; }
+
+  uint32_t sub_minor_key() const { return minor_key_; }
+
+  CommonArrayConstructorStub(uint32_t key, Isolate* isolate)
+      : TurboFanCodeStub(key, isolate) {}
+
  public:
-  ArrayNoArgumentConstructorStub(
-      Isolate* isolate,
-      ElementsKind kind,
-      AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
-      : ArrayConstructorStubBase(isolate, kind, override_mode) {
+  ElementsKind elements_kind() const {
+    return ElementsKindBits::decode(sub_minor_key());
+  }
+
+  AllocationSiteOverrideMode override_mode() const {
+    return AllocationSiteOverrideModeBits::decode(sub_minor_key());
   }
 
  private:
-  void PrintName(std::ostream& os) const override {  // NOLINT
-    BasePrintName(os, "ArrayNoArgumentConstructorStub");
-  }
+  // Ensure data fits within available bits.
+  STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructorConstantArgCount);
-  DEFINE_HYDROGEN_CODE_STUB(ArrayNoArgumentConstructor,
-                            ArrayConstructorStubBase);
+  class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
+  class AllocationSiteOverrideModeBits
+      : public BitField<AllocationSiteOverrideMode, 8, 1> {};  // NOLINT
 };
 
+class ArrayNoArgumentConstructorStub : public CommonArrayConstructorStub {
+ public:
+  ArrayNoArgumentConstructorStub(
+      Isolate* isolate, ElementsKind kind,
+      AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
+      : CommonArrayConstructorStub(isolate, kind, override_mode) {}
+
+ private:
+  void PrintName(std::ostream& os) const override {  // NOLINT
+    os << "ArrayNoArgumentConstructorStub";
+  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNoArgumentConstructor);
+  DEFINE_TURBOFAN_CODE_STUB(ArrayNoArgumentConstructor,
+                            CommonArrayConstructorStub);
+};
+
+class InternalArrayNoArgumentConstructorStub
+    : public CommonArrayConstructorStub {
+ public:
+  InternalArrayNoArgumentConstructorStub(Isolate* isolate, ElementsKind kind)
+      : CommonArrayConstructorStub(isolate, kind, DONT_OVERRIDE) {}
+
+ private:
+  void PrintName(std::ostream& os) const override {  // NOLINT
+    os << "InternalArrayNoArgumentConstructorStub";
+  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNoArgumentConstructor);
+  DEFINE_TURBOFAN_CODE_STUB(InternalArrayNoArgumentConstructor,
+                            CommonArrayConstructorStub);
+};
 
 class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
  public:
@@ -2804,19 +2974,6 @@
 };
 
 
-class InternalArrayNoArgumentConstructorStub : public
-    InternalArrayConstructorStubBase {
- public:
-  InternalArrayNoArgumentConstructorStub(Isolate* isolate,
-                                         ElementsKind kind)
-      : InternalArrayConstructorStubBase(isolate, kind) { }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructorConstantArgCount);
-  DEFINE_HYDROGEN_CODE_STUB(InternalArrayNoArgumentConstructor,
-                            InternalArrayConstructorStubBase);
-};
-
-
 class InternalArraySingleArgumentConstructorStub : public
     InternalArrayConstructorStubBase {
  public:
diff --git a/src/codegen.cc b/src/codegen.cc
index 692fa64..93ae307 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -162,16 +162,15 @@
         info->parse_info() && (code->kind() == Code::OPTIMIZED_FUNCTION ||
                                code->kind() == Code::FUNCTION);
     if (print_source) {
-      FunctionLiteral* literal = info->literal();
+      Handle<SharedFunctionInfo> shared = info->shared_info();
       Handle<Script> script = info->script();
       if (!script->IsUndefined() && !script->source()->IsUndefined()) {
         os << "--- Raw source ---\n";
         StringCharacterStream stream(String::cast(script->source()),
-                                     literal->start_position());
+                                     shared->start_position());
         // fun->end_position() points to the last character in the stream. We
         // need to compensate by adding one to calculate the length.
-        int source_len =
-            literal->end_position() - literal->start_position() + 1;
+        int source_len = shared->end_position() - shared->start_position() + 1;
         for (int i = 0; i < source_len; i++) {
           if (stream.HasMore()) {
             os << AsReversiblyEscapedUC16(stream.GetNext());
@@ -191,8 +190,8 @@
       os << "--- Code ---\n";
     }
     if (print_source) {
-      FunctionLiteral* literal = info->literal();
-      os << "source_position = " << literal->start_position() << "\n";
+      Handle<SharedFunctionInfo> shared = info->shared_info();
+      os << "source_position = " << shared->start_position() << "\n";
     }
     code->Disassemble(debug_name.get(), os);
     os << "--- End code ---\n";
diff --git a/src/compilation-statistics.cc b/src/compilation-statistics.cc
index ed568cb..c7e15b2 100644
--- a/src/compilation-statistics.cc
+++ b/src/compilation-statistics.cc
@@ -66,9 +66,8 @@
   double size_percent =
       static_cast<double>(stats.total_allocated_bytes_ * 100) /
       static_cast<double>(total_stats.total_allocated_bytes_);
-  base::OS::SNPrintF(buffer, kBufferSize,
-                     "%28s %10.3f (%5.1f%%)  "
-                     "%10u (%5.1f%%) %10u %10u",
+  base::OS::SNPrintF(buffer, kBufferSize, "%28s %10.3f (%5.1f%%)  %10" PRIuS
+                                          " (%5.1f%%) %10" PRIuS " %10" PRIuS,
                      name, ms, percent, stats.total_allocated_bytes_,
                      size_percent, stats.max_allocated_bytes_,
                      stats.absolute_max_allocated_bytes_);
diff --git a/src/compiler.cc b/src/compiler.cc
index 8bb5332..d649950 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -15,13 +15,11 @@
 #include "src/compilation-cache.h"
 #include "src/compiler/pipeline.h"
 #include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/typing.h"
 #include "src/debug/debug.h"
 #include "src/debug/liveedit.h"
 #include "src/deoptimizer.h"
+#include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
-#include "src/gdb-jit.h"
 #include "src/interpreter/interpreter.h"
 #include "src/isolate-inl.h"
 #include "src/log-inl.h"
@@ -32,6 +30,7 @@
 #include "src/profiler/cpu-profiler.h"
 #include "src/runtime-profiler.h"
 #include "src/snapshot/code-serializer.h"
+#include "src/typing-asm.h"
 #include "src/vm-state-inl.h"
 
 namespace v8 {
@@ -52,15 +51,10 @@
 
 
 PARSE_INFO_GETTER(Handle<Script>, script)
-PARSE_INFO_GETTER(bool, is_eval)
-PARSE_INFO_GETTER(bool, is_native)
-PARSE_INFO_GETTER(bool, is_module)
 PARSE_INFO_GETTER(FunctionLiteral*, literal)
-PARSE_INFO_GETTER_WITH_DEFAULT(LanguageMode, language_mode, STRICT)
-PARSE_INFO_GETTER_WITH_DEFAULT(Handle<JSFunction>, closure,
-                               Handle<JSFunction>::null())
 PARSE_INFO_GETTER_WITH_DEFAULT(Scope*, scope, nullptr)
-PARSE_INFO_GETTER(Handle<Context>, context)
+PARSE_INFO_GETTER_WITH_DEFAULT(Handle<Context>, context,
+                               Handle<Context>::null())
 PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
 
 #undef PARSE_INFO_GETTER
@@ -80,26 +74,17 @@
   CompilationInfo* info_;
 };
 
-// Exactly like a CompilationInfo, except being allocated via {new} and it also
-// creates and enters a Zone on construction and deallocates it on destruction.
-class CompilationInfoWithZone : public CompilationInfo {
- public:
-  explicit CompilationInfoWithZone(Handle<JSFunction> function)
-      : CompilationInfo(new ParseInfo(&zone_, function)),
-        zone_(function->GetIsolate()->allocator()) {}
-
-  // Virtual destructor because a CompilationInfoWithZone has to exit the
-  // zone scope and get rid of dependent maps even when the destructor is
-  // called when cast as a CompilationInfo.
-  virtual ~CompilationInfoWithZone() {
-    DisableFutureOptimization();
-    dependencies()->Rollback();
-    delete parse_info_;
-    parse_info_ = nullptr;
+// Helper that times a scoped region and records the elapsed time.
+struct ScopedTimer {
+  explicit ScopedTimer(base::TimeDelta* location) : location_(location) {
+    DCHECK(location_ != NULL);
+    timer_.Start();
   }
 
- private:
-  Zone zone_;
+  ~ScopedTimer() { *location_ += timer_.Elapsed(); }
+
+  base::ElapsedTimer timer_;
+  base::TimeDelta* location_;
 };
 
 // ----------------------------------------------------------------------------
@@ -109,25 +94,12 @@
   return parse_info_ && !parse_info_->shared_info().is_null();
 }
 
+CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+                                 Handle<JSFunction> closure)
+    : CompilationInfo(parse_info, {}, Code::ComputeFlags(Code::FUNCTION), BASE,
+                      parse_info->isolate(), parse_info->zone()) {
+  closure_ = closure;
 
-bool CompilationInfo::has_context() const {
-  return parse_info_ && !parse_info_->context().is_null();
-}
-
-
-bool CompilationInfo::has_literal() const {
-  return parse_info_ && parse_info_->literal() != nullptr;
-}
-
-
-bool CompilationInfo::has_scope() const {
-  return parse_info_ && parse_info_->scope() != nullptr;
-}
-
-
-CompilationInfo::CompilationInfo(ParseInfo* parse_info)
-    : CompilationInfo(parse_info, nullptr, Code::ComputeFlags(Code::FUNCTION),
-                      BASE, parse_info->isolate(), parse_info->zone()) {
   // Compiling for the snapshot typically results in different code than
   // compiling later on. This means that code recompiled with deoptimization
   // support won't be "equivalent" (as defined by SharedFunctionInfo::
@@ -140,19 +112,15 @@
   if (FLAG_turbo_inlining) MarkAsInliningEnabled();
   if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
   if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
-  if (FLAG_turbo_types) MarkAsTypingEnabled();
-
-  if (has_shared_info()) {
-    if (shared_info()->never_compiled()) MarkAsFirstCompile();
-  }
 }
 
-
-CompilationInfo::CompilationInfo(const char* debug_name, Isolate* isolate,
-                                 Zone* zone, Code::Flags code_flags)
+CompilationInfo::CompilationInfo(Vector<const char> debug_name,
+                                 Isolate* isolate, Zone* zone,
+                                 Code::Flags code_flags)
     : CompilationInfo(nullptr, debug_name, code_flags, STUB, isolate, zone) {}
 
-CompilationInfo::CompilationInfo(ParseInfo* parse_info, const char* debug_name,
+CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+                                 Vector<const char> debug_name,
                                  Code::Flags code_flags, Mode mode,
                                  Isolate* isolate, Zone* zone)
     : parse_info_(parse_info),
@@ -168,26 +136,20 @@
       prologue_offset_(Code::kPrologueOffsetNotSet),
       track_positions_(FLAG_hydrogen_track_positions ||
                        isolate->cpu_profiler()->is_profiling()),
-      opt_count_(has_shared_info() ? shared_info()->opt_count() : 0),
       parameter_count_(0),
       optimization_id_(-1),
       osr_expr_stack_height_(0),
       debug_name_(debug_name) {}
 
-
 CompilationInfo::~CompilationInfo() {
   DisableFutureOptimization();
+  dependencies()->Rollback();
   delete deferred_handles_;
-#ifdef DEBUG
-  // Check that no dependent maps have been added or added dependent maps have
-  // been rolled back or committed.
-  DCHECK(dependencies()->IsEmpty());
-#endif  // DEBUG
 }
 
 
 int CompilationInfo::num_parameters() const {
-  return has_scope() ? scope()->num_parameters() : parameter_count_;
+  return !IsStub() ? scope()->num_parameters() : parameter_count_;
 }
 
 
@@ -199,11 +161,6 @@
 bool CompilationInfo::is_this_defined() const { return !IsStub(); }
 
 
-int CompilationInfo::num_heap_slots() const {
-  return has_scope() ? scope()->num_heap_slots() : 0;
-}
-
-
 // Primitive functions are unlikely to be picked up by the stack-walking
 // profiler, so they trigger their own optimization when they're called
 // for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
@@ -212,7 +169,7 @@
          !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
          !literal()->dont_optimize() &&
          literal()->scope()->AllowsLazyCompilation() &&
-         (!has_shared_info() || !shared_info()->optimization_disabled());
+         !shared_info()->optimization_disabled();
 }
 
 
@@ -221,59 +178,6 @@
 }
 
 
-int CompilationInfo::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
-                                          SourcePosition position,
-                                          int parent_id) {
-  DCHECK(track_positions_);
-
-  int inline_id = static_cast<int>(inlined_function_infos_.size());
-  InlinedFunctionInfo info(parent_id, position, UnboundScript::kNoScriptId,
-      shared->start_position());
-  if (!shared->script()->IsUndefined()) {
-    Handle<Script> script(Script::cast(shared->script()));
-    info.script_id = script->id();
-
-    if (FLAG_hydrogen_track_positions && !script->source()->IsUndefined()) {
-      CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
-      OFStream os(tracing_scope.file());
-      os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
-         << ") id{" << optimization_id() << "," << inline_id << "} ---\n";
-      {
-        DisallowHeapAllocation no_allocation;
-        int start = shared->start_position();
-        int len = shared->end_position() - start;
-        String::SubStringRange source(String::cast(script->source()), start,
-                                      len);
-        for (const auto& c : source) {
-          os << AsReversiblyEscapedUC16(c);
-        }
-      }
-
-      os << "\n--- END ---\n";
-    }
-  }
-
-  inlined_function_infos_.push_back(info);
-
-  if (FLAG_hydrogen_track_positions && inline_id != 0) {
-    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
-    OFStream os(tracing_scope.file());
-    os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
-       << optimization_id() << "," << inline_id << "} AS " << inline_id
-       << " AT " << position << std::endl;
-  }
-
-  return inline_id;
-}
-
-
-void CompilationInfo::LogDeoptCallPosition(int pc_offset, int inlining_id) {
-  if (!track_positions_ || IsStub()) return;
-  DCHECK_LT(static_cast<size_t>(inlining_id), inlined_function_infos_.size());
-  inlined_function_infos_.at(inlining_id).deopt_pc_offsets.push_back(pc_offset);
-}
-
-
 base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
   if (parse_info() && parse_info()->literal()) {
     AllowHandleDereference allow_deref;
@@ -282,10 +186,11 @@
   if (parse_info() && !parse_info()->shared_info().is_null()) {
     return parse_info()->shared_info()->DebugName()->ToCString();
   }
-  const char* str = debug_name_ ? debug_name_ : "unknown";
-  size_t len = strlen(str) + 1;
-  base::SmartArrayPointer<char> name(new char[len]);
-  memcpy(name.get(), str, len);
+  Vector<const char> name_vec = debug_name_;
+  if (name_vec.is_empty()) name_vec = ArrayVector("unknown");
+  base::SmartArrayPointer<char> name(new char[name_vec.length() + 1]);
+  memcpy(name.get(), name_vec.start(), name_vec.length());
+  name[name_vec.length()] = '\0';
   return name;
 }
 
@@ -308,8 +213,15 @@
   }
 }
 
+int CompilationInfo::GetDeclareGlobalsFlags() const {
+  DCHECK(DeclareGlobalsLanguageMode::is_valid(parse_info()->language_mode()));
+  return DeclareGlobalsEvalFlag::encode(parse_info()->is_eval()) |
+         DeclareGlobalsNativeFlag::encode(parse_info()->is_native()) |
+         DeclareGlobalsLanguageMode::encode(parse_info()->language_mode());
+}
+
 bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
-  return is_sloppy(language_mode()) && !is_native();
+  return is_sloppy(parse_info()->language_mode()) && !parse_info()->is_native();
 }
 
 #if DEBUG
@@ -320,250 +232,47 @@
 #endif
 
 // ----------------------------------------------------------------------------
-// Implementation of OptimizedCompileJob
+// Implementation of CompilationJob
 
-class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
- public:
-  explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
-      : HOptimizedGraphBuilder(info) {
-  }
-
-#define DEF_VISIT(type)                                      \
-  void Visit##type(type* node) override {                    \
-    SourcePosition old_position = SourcePosition::Unknown(); \
-    if (node->position() != RelocInfo::kNoPosition) {        \
-      old_position = source_position();                      \
-      SetSourcePosition(node->position());                   \
-    }                                                        \
-    HOptimizedGraphBuilder::Visit##type(node);               \
-    if (!old_position.IsUnknown()) {                         \
-      set_source_position(old_position);                     \
-    }                                                        \
-  }
-  EXPRESSION_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-#define DEF_VISIT(type)                                      \
-  void Visit##type(type* node) override {                    \
-    SourcePosition old_position = SourcePosition::Unknown(); \
-    if (node->position() != RelocInfo::kNoPosition) {        \
-      old_position = source_position();                      \
-      SetSourcePosition(node->position());                   \
-    }                                                        \
-    HOptimizedGraphBuilder::Visit##type(node);               \
-    if (!old_position.IsUnknown()) {                         \
-      set_source_position(old_position);                     \
-    }                                                        \
-  }
-  STATEMENT_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-#define DEF_VISIT(type)                        \
-  void Visit##type(type* node) override {      \
-    HOptimizedGraphBuilder::Visit##type(node); \
-  }
-  DECLARATION_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-};
-
-
-OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
+CompilationJob::Status CompilationJob::CreateGraph() {
+  DisallowJavascriptExecution no_js(isolate());
   DCHECK(info()->IsOptimizing());
 
-  // Do not use Crankshaft/TurboFan if we need to be able to set break points.
-  if (info()->shared_info()->HasDebugInfo()) {
-    return AbortOptimization(kFunctionBeingDebugged);
-  }
-
-  // Resuming a suspended frame is not supported by Crankshaft/TurboFan.
-  if (info()->shared_info()->HasBuiltinFunctionId() &&
-      (info()->shared_info()->builtin_function_id() == kGeneratorObjectNext ||
-       info()->shared_info()->builtin_function_id() == kGeneratorObjectReturn ||
-       info()->shared_info()->builtin_function_id() == kGeneratorObjectThrow)) {
-    return AbortOptimization(kGeneratorResumeMethod);
-  }
-
-  // Limit the number of times we try to optimize functions.
-  const int kMaxOptCount =
-      FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
-  if (info()->opt_count() > kMaxOptCount) {
-    return AbortOptimization(kOptimizedTooManyTimes);
-  }
-
-  // Check the whitelist for Crankshaft.
-  if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
-    return AbortOptimization(kHydrogenFilter);
-  }
-
-  // Optimization requires a version of fullcode with deoptimization support.
-  // Recompile the unoptimized version of the code if the current version
-  // doesn't have deoptimization support already.
-  // Otherwise, if we are gathering compilation time and space statistics
-  // for hydrogen, gather baseline statistics for a fullcode compilation.
-  bool should_recompile = !info()->shared_info()->has_deoptimization_support();
-  if (should_recompile || FLAG_hydrogen_stats) {
-    base::ElapsedTimer timer;
-    if (FLAG_hydrogen_stats) {
-      timer.Start();
-    }
-    if (!Compiler::EnsureDeoptimizationSupport(info())) {
-      return SetLastStatus(FAILED);
-    }
-    if (FLAG_hydrogen_stats) {
-      isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
-    }
-  }
-
-  DCHECK(info()->shared_info()->has_deoptimization_support());
-  DCHECK(!info()->is_first_compile());
-
-  bool optimization_disabled = info()->shared_info()->optimization_disabled();
-  bool dont_crankshaft = info()->shared_info()->dont_crankshaft();
-
-  // Check the enabling conditions for Turbofan.
-  // 1. "use asm" code.
-  bool is_turbofanable_asm = FLAG_turbo_asm &&
-                             info()->shared_info()->asm_function() &&
-                             !optimization_disabled;
-
-  // 2. Fallback for features unsupported by Crankshaft.
-  bool is_unsupported_by_crankshaft_but_turbofanable =
-      dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
-      !optimization_disabled;
-
-  // 3. Explicitly enabled by the command-line filter.
-  bool passes_turbo_filter =
-      info()->shared_info()->PassesFilter(FLAG_turbo_filter);
-
-  // If this is OSR request, OSR must be enabled by Turbofan.
-  bool passes_osr_test = FLAG_turbo_osr || !info()->is_osr();
-
-  if ((is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
-       passes_turbo_filter) &&
-      passes_osr_test) {
-    // Use TurboFan for the compilation.
-    if (FLAG_trace_opt) {
-      OFStream os(stdout);
-      os << "[compiling method " << Brief(*info()->closure())
-         << " using TurboFan";
-      if (info()->is_osr()) os << " OSR";
-      os << "]" << std::endl;
-    }
-
-    if (info()->shared_info()->asm_function()) {
-      if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
-      info()->MarkAsFunctionContextSpecializing();
-    } else {
-      if (!FLAG_always_opt) {
-        info()->MarkAsBailoutOnUninitialized();
-      }
-      if (FLAG_native_context_specialization) {
-        info()->MarkAsNativeContextSpecializing();
-        info()->MarkAsTypingEnabled();
-      }
-    }
-    if (!info()->shared_info()->asm_function() ||
-        FLAG_turbo_asm_deoptimization) {
-      info()->MarkAsDeoptimizationEnabled();
-    }
-
-    Timer t(this, &time_taken_to_create_graph_);
-    compiler::Pipeline pipeline(info());
-    pipeline.GenerateCode();
-    if (!info()->code().is_null()) {
-      return SetLastStatus(SUCCEEDED);
-    }
-  }
-
-  if (!isolate()->use_crankshaft() || dont_crankshaft) {
-    // Crankshaft is entirely disabled.
-    return SetLastStatus(FAILED);
-  }
-
-  Scope* scope = info()->scope();
-  if (LUnallocated::TooManyParameters(scope->num_parameters())) {
-    // Crankshaft would require too many Lithium operands.
-    return AbortOptimization(kTooManyParameters);
-  }
-
-  if (info()->is_osr() &&
-      LUnallocated::TooManyParametersOrStackSlots(scope->num_parameters(),
-                                                  scope->num_stack_slots())) {
-    // Crankshaft would require too many Lithium operands.
-    return AbortOptimization(kTooManyParametersLocals);
-  }
-
   if (FLAG_trace_opt) {
     OFStream os(stdout);
-    os << "[compiling method " << Brief(*info()->closure())
-       << " using Crankshaft";
+    os << "[compiling method " << Brief(*info()->closure()) << " using "
+       << compiler_name_;
     if (info()->is_osr()) os << " OSR";
     os << "]" << std::endl;
   }
 
-  if (FLAG_trace_hydrogen) {
-    isolate()->GetHTracer()->TraceCompilation(info());
-  }
-
-  // Type-check the function.
-  AstTyper(info()->isolate(), info()->zone(), info()->closure(),
-           info()->scope(), info()->osr_ast_id(), info()->literal())
-      .Run();
-
-  // Optimization could have been disabled by the parser. Note that this check
-  // is only needed because the Hydrogen graph builder is missing some bailouts.
-  if (info()->shared_info()->optimization_disabled()) {
-    return AbortOptimization(
-        info()->shared_info()->disable_optimization_reason());
-  }
-
-  HOptimizedGraphBuilder* graph_builder =
-      (info()->is_tracking_positions() || FLAG_trace_ic)
-          ? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
-          : new (info()->zone()) HOptimizedGraphBuilder(info());
-
-  Timer t(this, &time_taken_to_create_graph_);
-  graph_ = graph_builder->CreateGraph();
-
-  if (isolate()->has_pending_exception()) {
-    return SetLastStatus(FAILED);
-  }
-
-  if (graph_ == NULL) return SetLastStatus(BAILED_OUT);
-
-  if (info()->dependencies()->HasAborted()) {
-    // Dependency has changed during graph creation. Let's try again later.
-    return RetryOptimization(kBailedOutDueToDependencyChange);
-  }
-
-  return SetLastStatus(SUCCEEDED);
+  // Delegate to the underlying implementation.
+  DCHECK_EQ(SUCCEEDED, last_status());
+  ScopedTimer t(&time_taken_to_create_graph_);
+  return SetLastStatus(CreateGraphImpl());
 }
 
-
-OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
+CompilationJob::Status CompilationJob::OptimizeGraph() {
   DisallowHeapAllocation no_allocation;
   DisallowHandleAllocation no_handles;
   DisallowHandleDereference no_deref;
   DisallowCodeDependencyChange no_dependency_change;
 
-  DCHECK(last_status() == SUCCEEDED);
-  // TODO(turbofan): Currently everything is done in the first phase.
-  if (!info()->code().is_null()) {
-    return last_status();
-  }
+  // Delegate to the underlying implementation.
+  DCHECK_EQ(SUCCEEDED, last_status());
+  ScopedTimer t(&time_taken_to_optimize_);
+  return SetLastStatus(OptimizeGraphImpl());
+}
 
-  Timer t(this, &time_taken_to_optimize_);
-  DCHECK(graph_ != NULL);
-  BailoutReason bailout_reason = kNoReason;
+CompilationJob::Status CompilationJob::GenerateCode() {
+  DisallowCodeDependencyChange no_dependency_change;
+  DisallowJavascriptExecution no_js(isolate());
+  DCHECK(!info()->dependencies()->HasAborted());
 
-  if (graph_->Optimize(&bailout_reason)) {
-    chunk_ = LChunk::NewChunk(graph_);
-    if (chunk_ != NULL) return SetLastStatus(SUCCEEDED);
-  } else if (bailout_reason != kNoReason) {
-    info_->AbortOptimization(bailout_reason);
-  }
-
-  return SetLastStatus(BAILED_OUT);
+  // Delegate to the underlying implementation.
+  DCHECK_EQ(SUCCEEDED, last_status());
+  ScopedTimer t(&time_taken_to_codegen_);
+  return SetLastStatus(GenerateCodeImpl());
 }
 
 
@@ -578,8 +287,9 @@
   heap->AddWeakObjectToCodeDependency(object, dep);
 }
 
+}  // namespace
 
-void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
+void CompilationJob::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
   // TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
   Isolate* const isolate = code->GetIsolate();
   DCHECK(code->is_optimized_code());
@@ -619,53 +329,7 @@
   code->set_can_have_weak_objects(true);
 }
 
-}  // namespace
-
-
-OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
-  DCHECK(last_status() == SUCCEEDED);
-  // TODO(turbofan): Currently everything is done in the first phase.
-  if (!info()->code().is_null()) {
-    info()->dependencies()->Commit(info()->code());
-    if (info()->is_deoptimization_enabled()) {
-      info()->parse_info()->context()->native_context()->AddOptimizedCode(
-          *info()->code());
-      RegisterWeakObjectsInOptimizedCode(info()->code());
-    }
-    RecordOptimizationStats();
-    return last_status();
-  }
-
-  DCHECK(!info()->dependencies()->HasAborted());
-  DisallowCodeDependencyChange no_dependency_change;
-  DisallowJavascriptExecution no_js(isolate());
-  {  // Scope for timer.
-    Timer timer(this, &time_taken_to_codegen_);
-    DCHECK(chunk_ != NULL);
-    DCHECK(graph_ != NULL);
-    // Deferred handles reference objects that were accessible during
-    // graph creation.  To make sure that we don't encounter inconsistencies
-    // between graph creation and code generation, we disallow accessing
-    // objects through deferred handles during the latter, with exceptions.
-    DisallowDeferredHandleDereference no_deferred_handle_deref;
-    Handle<Code> optimized_code = chunk_->Codegen();
-    if (optimized_code.is_null()) {
-      if (info()->bailout_reason() == kNoReason) {
-        return AbortOptimization(kCodeGenerationFailed);
-      }
-      return SetLastStatus(BAILED_OUT);
-    }
-    RegisterWeakObjectsInOptimizedCode(optimized_code);
-    info()->SetCode(optimized_code);
-  }
-  RecordOptimizationStats();
-  // Add to the weak list of optimized code objects.
-  info()->context()->native_context()->AddOptimizedCode(*info()->code());
-  return SetLastStatus(SUCCEEDED);
-}
-
-
-void OptimizedCompileJob::RecordOptimizationStats() {
+void CompilationJob::RecordOptimizationStats() {
   Handle<JSFunction> function = info()->closure();
   if (!function->IsOptimized()) {
     // Concurrent recompilation and OSR may race.  Increment only once.
@@ -706,47 +370,25 @@
 
 namespace {
 
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
-                                          int estimate) {
-  // If no properties are added in the constructor, they are more likely
-  // to be added later.
-  if (estimate == 0) estimate = 2;
-
-  // TODO(yangguo): check whether those heuristics are still up-to-date.
-  // We do not shrink objects that go into a snapshot (yet), so we adjust
-  // the estimate conservatively.
-  if (shared->GetIsolate()->serializer_enabled()) {
-    estimate += 2;
-  } else {
-    // Inobject slack tracking will reclaim redundant inobject space later,
-    // so we can afford to adjust the estimate generously.
-    estimate += 8;
-  }
-
-  shared->set_expected_nof_properties(estimate);
-}
-
-void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
-                              BailoutReason bailout_reason) {
-  if (bailout_reason != kNoReason) {
-    shared_info->DisableOptimization(bailout_reason);
-  }
+bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
+  return shared->is_toplevel() && shared->script()->IsScript() &&
+         Script::cast(shared->script())->compilation_type() ==
+             Script::COMPILATION_TYPE_EVAL;
 }
 
 void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
-                               CompilationInfo* info,
-                               Handle<SharedFunctionInfo> shared) {
-  // SharedFunctionInfo is passed separately, because if CompilationInfo
-  // was created using Script object, it will not have it.
-
+                               CompilationInfo* info) {
   // Log the code generation. If source information is available include
   // script name and line number. Check explicitly whether logging is
   // enabled as finding the line number is not free.
   if (info->isolate()->logger()->is_logging_code_events() ||
       info->isolate()->cpu_profiler()->is_profiling()) {
+    Handle<SharedFunctionInfo> shared = info->shared_info();
     Handle<Script> script = info->parse_info()->script();
-    Handle<AbstractCode> abstract_code = info->abstract_code();
+    Handle<AbstractCode> abstract_code =
+        info->has_bytecode_array()
+            ? Handle<AbstractCode>::cast(info->bytecode_array())
+            : Handle<AbstractCode>::cast(info->code());
     if (abstract_code.is_identical_to(
             info->isolate()->builtins()->CompileLazy())) {
       return;
@@ -759,13 +401,13 @@
                               : info->isolate()->heap()->empty_string();
     Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
     PROFILE(info->isolate(),
-            CodeCreateEvent(log_tag, *abstract_code, *shared, info, script_name,
+            CodeCreateEvent(log_tag, *abstract_code, *shared, script_name,
                             line_num, column_num));
   }
 }
 
 void EnsureFeedbackVector(CompilationInfo* info) {
-  if (!info->has_shared_info()) return;
+  DCHECK(info->has_shared_info());
 
   // If no type feedback vector exists, we create one now. At this point the
   // AstNumbering pass has already run. Note the snapshot can contain outdated
@@ -786,28 +428,9 @@
       info->literal()->feedback_vector_spec()));
 }
 
-bool CompileUnoptimizedCode(CompilationInfo* info) {
-  DCHECK(AllowCompilation::IsAllowed(info->isolate()));
-  if (!Compiler::Analyze(info->parse_info()) ||
-      !(EnsureFeedbackVector(info), FullCodeGenerator::MakeCode(info))) {
-    Isolate* isolate = info->isolate();
-    if (!isolate->has_pending_exception()) isolate->StackOverflow();
-    return false;
-  }
-  return true;
-}
-
 bool UseIgnition(CompilationInfo* info) {
-  // TODO(4681): Generator functions are not yet supported.
-  if (info->shared_info()->is_generator()) {
-    return false;
-  }
-
-  // TODO(4681): Resuming a suspended frame is not supported.
-  if (info->shared_info()->HasBuiltinFunctionId() &&
-      (info->shared_info()->builtin_function_id() == kGeneratorObjectNext ||
-       info->shared_info()->builtin_function_id() == kGeneratorObjectReturn ||
-       info->shared_info()->builtin_function_id() == kGeneratorObjectThrow)) {
+  if (info->is_debug()) return false;
+  if (info->shared_info()->is_resumable() && !FLAG_ignition_generators) {
     return false;
   }
 
@@ -839,9 +462,20 @@
   return size;
 }
 
-bool GenerateBaselineCode(CompilationInfo* info) {
+bool GenerateUnoptimizedCode(CompilationInfo* info) {
   bool success;
   EnsureFeedbackVector(info);
+  if (FLAG_validate_asm && info->scope()->asm_module()) {
+    AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
+                   info->literal());
+    if (FLAG_enable_simd_asmjs) {
+      typer.set_allow_simd(true);
+    }
+    if (!typer.Validate()) {
+      DCHECK(!info->isolate()->has_pending_exception());
+      PrintF("Validation of asm.js module failed: %s", typer.error_message());
+    }
+  }
   if (FLAG_ignition && UseIgnition(info)) {
     success = interpreter::Interpreter::MakeBytecode(info);
   } else {
@@ -850,15 +484,17 @@
   if (success) {
     Isolate* isolate = info->isolate();
     Counters* counters = isolate->counters();
+    // TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
     counters->total_baseline_code_size()->Increment(CodeAndMetadataSize(info));
     counters->total_baseline_compile_count()->Increment(1);
   }
   return success;
 }
 
-bool CompileBaselineCode(CompilationInfo* info) {
+bool CompileUnoptimizedCode(CompilationInfo* info) {
   DCHECK(AllowCompilation::IsAllowed(info->isolate()));
-  if (!Compiler::Analyze(info->parse_info()) || !GenerateBaselineCode(info)) {
+  if (!Compiler::Analyze(info->parse_info()) ||
+      !GenerateUnoptimizedCode(info)) {
     Isolate* isolate = info->isolate();
     if (!isolate->has_pending_exception()) isolate->StackOverflow();
     return false;
@@ -866,44 +502,45 @@
   return true;
 }
 
-void InstallBaselineCompilationResult(CompilationInfo* info,
-                                      Handle<SharedFunctionInfo> shared,
-                                      Handle<ScopeInfo> scope_info) {
+void InstallSharedScopeInfo(CompilationInfo* info,
+                            Handle<SharedFunctionInfo> shared) {
+  Handle<ScopeInfo> scope_info =
+      ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
+  shared->set_scope_info(*scope_info);
+}
+
+void InstallSharedCompilationResult(CompilationInfo* info,
+                                    Handle<SharedFunctionInfo> shared) {
   // Assert that we are not overwriting (possibly patched) debug code.
-  DCHECK(!shared->HasDebugCode());
+  DCHECK(!shared->HasDebugInfo());
   DCHECK(!info->code().is_null());
   shared->ReplaceCode(*info->code());
-  shared->set_scope_info(*scope_info);
   if (info->has_bytecode_array()) {
     DCHECK(!shared->HasBytecodeArray());  // Only compiled once.
     shared->set_bytecode_array(*info->bytecode_array());
   }
 }
 
-MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCodeCommon(
-    CompilationInfo* info) {
+MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
   VMState<COMPILER> state(info->isolate());
   PostponeInterruptsScope postpone(info->isolate());
 
   // Parse and update CompilationInfo with the results.
   if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
   Handle<SharedFunctionInfo> shared = info->shared_info();
-  FunctionLiteral* lit = info->literal();
-  DCHECK_EQ(shared->language_mode(), lit->language_mode());
-  SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
-  MaybeDisableOptimization(shared, lit->dont_optimize_reason());
+  DCHECK_EQ(shared->language_mode(), info->literal()->language_mode());
 
   // Compile either unoptimized code or bytecode for the interpreter.
-  if (!CompileBaselineCode(info)) return MaybeHandle<Code>();
-  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+  if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
 
-  // Update the shared function info with the scope info. Allocating the
-  // ScopeInfo object may cause a GC.
-  Handle<ScopeInfo> scope_info =
-      ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
+  // Update the shared function info with the scope info.
+  InstallSharedScopeInfo(info, shared);
 
   // Install compilation result on the shared function info
-  InstallBaselineCompilationResult(info, shared, scope_info);
+  InstallSharedCompilationResult(info, shared);
+
+  // Record the function compilation event.
+  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info);
 
   return info->code();
 }
@@ -964,7 +601,9 @@
   if (!shared_info.is_null()) {
     FunctionLiteral* lit = parse_info->literal();
     shared_info->set_ast_node_count(lit->ast_node_count());
-    MaybeDisableOptimization(shared_info, lit->dont_optimize_reason());
+    if (lit->dont_optimize_reason() != kNoReason) {
+      shared_info->DisableOptimization(lit->dont_optimize_reason());
+    }
     shared_info->set_dont_crankshaft(
         shared_info->dont_crankshaft() ||
         (lit->flags() & AstProperties::kDontCrankshaft));
@@ -972,21 +611,42 @@
   return true;
 }
 
-bool GetOptimizedCodeNow(CompilationInfo* info) {
-  Isolate* isolate = info->isolate();
-  CanonicalHandleScope canonical(isolate);
-  TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
-  TRACE_EVENT0("v8", "V8.OptimizeCode");
+bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
+  bool optimization_disabled = shared->optimization_disabled();
+  bool dont_crankshaft = shared->dont_crankshaft();
 
-  if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+  // Check the enabling conditions for Turbofan.
+  // 1. "use asm" code.
+  bool is_turbofanable_asm =
+      FLAG_turbo_asm && shared->asm_function() && !optimization_disabled;
+
+  // 2. Fallback for features unsupported by Crankshaft.
+  bool is_unsupported_by_crankshaft_but_turbofanable =
+      dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
+      !optimization_disabled;
+
+  // 3. Explicitly enabled by the command-line filter.
+  bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
+
+  return is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
+         passes_turbo_filter;
+}
+
+bool GetOptimizedCodeNow(CompilationJob* job) {
+  CompilationInfo* info = job->info();
+  Isolate* isolate = info->isolate();
+
+  // Parsing is not required when optimizing from existing bytecode.
+  if (!info->is_optimizing_from_bytecode()) {
+    if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+  }
 
   TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
   TRACE_EVENT0("v8", "V8.RecompileSynchronous");
 
-  OptimizedCompileJob job(info);
-  if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED ||
-      job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED ||
-      job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
+  if (job->CreateGraph() != CompilationJob::SUCCEEDED ||
+      job->OptimizeGraph() != CompilationJob::SUCCEEDED ||
+      job->GenerateCode() != CompilationJob::SUCCEEDED) {
     if (FLAG_trace_opt) {
       PrintF("[aborted optimizing ");
       info->closure()->ShortPrint();
@@ -996,18 +656,16 @@
   }
 
   // Success!
+  job->RecordOptimizationStats();
   DCHECK(!isolate->has_pending_exception());
   InsertCodeIntoOptimizedCodeMap(info);
-  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
-                            info->shared_info());
+  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info);
   return true;
 }
 
-bool GetOptimizedCodeLater(CompilationInfo* info) {
+bool GetOptimizedCodeLater(CompilationJob* job) {
+  CompilationInfo* info = job->info();
   Isolate* isolate = info->isolate();
-  CanonicalHandleScope canonical(isolate);
-  TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
-  TRACE_EVENT0("v8", "V8.OptimizeCode");
 
   if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
     if (FLAG_trace_concurrent_recompilation) {
@@ -1018,8 +676,14 @@
     return false;
   }
 
+  // All handles below this point will be allocated in a deferred handle scope
+  // that is detached and handed off to the background thread when we return.
   CompilationHandleScope handle_scope(info);
-  if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+
+  // Parsing is not required when optimizing from existing bytecode.
+  if (!info->is_optimizing_from_bytecode()) {
+    if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+  }
 
   // Reopen handles in the new CompilationHandleScope.
   info->ReopenHandlesInNewHandleScope();
@@ -1028,45 +692,23 @@
   TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
   TRACE_EVENT0("v8", "V8.RecompileSynchronous");
 
-  OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info);
-  OptimizedCompileJob::Status status = job->CreateGraph();
-  if (status != OptimizedCompileJob::SUCCEEDED) return false;
+  if (job->CreateGraph() != CompilationJob::SUCCEEDED) return false;
   isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
 
   if (FLAG_trace_concurrent_recompilation) {
     PrintF("  ** Queued ");
     info->closure()->ShortPrint();
-    if (info->is_osr()) {
-      PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
-    } else {
-      PrintF(" for concurrent optimization.\n");
-    }
+    PrintF(" for concurrent optimization.\n");
   }
   return true;
 }
 
-MaybeHandle<Code> GetUnoptimizedCode(Handle<JSFunction> function) {
-  DCHECK(!function->GetIsolate()->has_pending_exception());
-  DCHECK(!function->is_compiled());
-  if (function->shared()->is_compiled()) {
-    return Handle<Code>(function->shared()->code());
-  }
-
-  CompilationInfoWithZone info(function);
-  Handle<Code> result;
-  ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
-                             GetUnoptimizedCodeCommon(&info),
-                             Code);
-  return result;
-}
-
 MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
                                    Compiler::ConcurrencyMode mode,
                                    BailoutId osr_ast_id = BailoutId::None(),
                                    JavaScriptFrame* osr_frame = nullptr) {
   Isolate* isolate = function->GetIsolate();
   Handle<SharedFunctionInfo> shared(function->shared(), isolate);
-  if (shared->HasDebugInfo()) return MaybeHandle<Code>();
 
   Handle<Code> cached_code;
   if (GetCodeFromOptimizedCodeMap(function, osr_ast_id)
@@ -1082,46 +724,186 @@
     return cached_code;
   }
 
-  DCHECK(AllowCompilation::IsAllowed(isolate));
-
+  // Reset profiler ticks, function is no longer considered hot.
   if (shared->is_compiled()) {
     shared->code()->set_profiler_ticks(0);
   }
 
-  // TODO(mstarzinger): We cannot properly deserialize a scope chain containing
-  // an eval scope and hence would fail at parsing the eval source again.
-  if (shared->disable_optimization_reason() == kEval) {
-    return MaybeHandle<Code>();
-  }
-
-  // TODO(mstarzinger): We cannot properly deserialize a scope chain for the
-  // builtin context, hence Genesis::InstallExperimentalNatives would fail.
-  if (shared->is_toplevel() && isolate->bootstrapper()->IsActive()) {
-    return MaybeHandle<Code>();
-  }
-
-  base::SmartPointer<CompilationInfo> info(
-      new CompilationInfoWithZone(function));
   VMState<COMPILER> state(isolate);
   DCHECK(!isolate->has_pending_exception());
   PostponeInterruptsScope postpone(isolate);
+  bool use_turbofan = UseTurboFan(shared);
+  base::SmartPointer<CompilationJob> job(
+      use_turbofan ? compiler::Pipeline::NewCompilationJob(function)
+                   : new HCompilationJob(function));
+  CompilationInfo* info = job->info();
+  ParseInfo* parse_info = info->parse_info();
 
-  info->SetOptimizingForOsr(osr_ast_id);
+  info->SetOptimizingForOsr(osr_ast_id, osr_frame);
+
+  // Do not use Crankshaft/TurboFan if we need to be able to set break points.
+  if (info->shared_info()->HasDebugInfo()) {
+    info->AbortOptimization(kFunctionBeingDebugged);
+    return MaybeHandle<Code>();
+  }
+
+  // Limit the number of times we try to optimize functions.
+  const int kMaxOptCount =
+      FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
+  if (info->shared_info()->opt_count() > kMaxOptCount) {
+    info->AbortOptimization(kOptimizedTooManyTimes);
+    return MaybeHandle<Code>();
+  }
+
+  CanonicalHandleScope canonical(isolate);
+  TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
+  TRACE_EVENT0("v8", "V8.OptimizeCode");
+
+  // TurboFan can optimize directly from existing bytecode.
+  if (FLAG_turbo_from_bytecode && use_turbofan &&
+      info->shared_info()->HasBytecodeArray()) {
+    info->MarkAsOptimizeFromBytecode();
+  }
+
+  if (IsEvalToplevel(shared)) {
+    parse_info->set_eval();
+    if (function->context()->IsNativeContext()) parse_info->set_global();
+    parse_info->set_toplevel();
+    parse_info->set_allow_lazy_parsing(false);
+    parse_info->set_lazy(false);
+  }
 
   if (mode == Compiler::CONCURRENT) {
-    if (GetOptimizedCodeLater(info.get())) {
-      info.Detach();  // The background recompile job owns this now.
+    if (GetOptimizedCodeLater(job.get())) {
+      job.Detach();   // The background recompile job owns this now.
       return isolate->builtins()->InOptimizationQueue();
     }
   } else {
-    info->set_osr_frame(osr_frame);
-    if (GetOptimizedCodeNow(info.get())) return info->code();
+    if (GetOptimizedCodeNow(job.get())) return info->code();
   }
 
   if (isolate->has_pending_exception()) isolate->clear_pending_exception();
   return MaybeHandle<Code>();
 }
 
+class InterpreterActivationsFinder : public ThreadVisitor,
+                                     public OptimizedFunctionVisitor {
+ public:
+  SharedFunctionInfo* shared_;
+  bool has_activations_;
+
+  explicit InterpreterActivationsFinder(SharedFunctionInfo* shared)
+      : shared_(shared), has_activations_(false) {}
+
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    JavaScriptFrameIterator it(isolate, top);
+    for (; !it.done() && !has_activations_; it.Advance()) {
+      JavaScriptFrame* frame = it.frame();
+      if (!frame->is_interpreted()) continue;
+      if (frame->function()->shared() == shared_) has_activations_ = true;
+    }
+  }
+
+  void VisitFunction(JSFunction* function) {
+    if (function->Inlines(shared_)) has_activations_ = true;
+  }
+
+  void EnterContext(Context* context) {}
+  void LeaveContext(Context* context) {}
+};
+
+bool HasInterpreterActivations(Isolate* isolate, SharedFunctionInfo* shared) {
+  InterpreterActivationsFinder activations_finder(shared);
+  activations_finder.VisitThread(isolate, isolate->thread_local_top());
+  isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
+  if (FLAG_turbo_from_bytecode) {
+    // If we are able to optimize functions directly from bytecode, then there
+    // might be optimized functions that rely on bytecode being around. We need
+    // to prevent switching the given function to baseline code in those cases.
+    Deoptimizer::VisitAllOptimizedFunctions(isolate, &activations_finder);
+  }
+  return activations_finder.has_activations_;
+}
+
+MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
+  Isolate* isolate = function->GetIsolate();
+  VMState<COMPILER> state(isolate);
+  PostponeInterruptsScope postpone(isolate);
+  Zone zone(isolate->allocator());
+  ParseInfo parse_info(&zone, function);
+  CompilationInfo info(&parse_info, function);
+
+  // Reset profiler ticks, function is no longer considered hot.
+  if (function->shared()->HasBytecodeArray()) {
+    function->shared()->set_profiler_ticks(0);
+  }
+
+  // Nothing left to do if the function already has baseline code.
+  if (function->shared()->code()->kind() == Code::FUNCTION) {
+    return Handle<Code>(function->shared()->code());
+  }
+
+  // We do not switch to baseline code when the debugger might have created a
+  // copy of the bytecode with break slots to be able to set break points.
+  if (function->shared()->HasDebugInfo()) {
+    return MaybeHandle<Code>();
+  }
+
+  // TODO(4280): For now we do not switch generators to baseline code because
+  // there might be suspended activations stored in generator objects on the
+  // heap. We could eventually go directly to TurboFan in this case.
+  if (function->shared()->is_generator()) {
+    return MaybeHandle<Code>();
+  }
+
+  // TODO(4280): For now we disable switching to baseline code in the presence
+  // of interpreter activations of the given function. The reasons are:
+  //  1) The debugger assumes each function is either full-code or bytecode.
+  //  2) The underlying bytecode is cleared below, breaking stack unwinding.
+  if (HasInterpreterActivations(isolate, function->shared())) {
+    if (FLAG_trace_opt) {
+      OFStream os(stdout);
+      os << "[unable to switch " << Brief(*function) << " due to activations]"
+         << std::endl;
+    }
+    return MaybeHandle<Code>();
+  }
+
+  if (FLAG_trace_opt) {
+    OFStream os(stdout);
+    os << "[switching method " << Brief(*function) << " to baseline code]"
+       << std::endl;
+  }
+
+  // Parse and update CompilationInfo with the results.
+  if (!Parser::ParseStatic(info.parse_info())) return MaybeHandle<Code>();
+  Handle<SharedFunctionInfo> shared = info.shared_info();
+  DCHECK_EQ(shared->language_mode(), info.literal()->language_mode());
+
+  // Compile baseline code using the full code generator.
+  if (!Compiler::Analyze(info.parse_info()) ||
+      !FullCodeGenerator::MakeCode(&info)) {
+    if (!isolate->has_pending_exception()) isolate->StackOverflow();
+    return MaybeHandle<Code>();
+  }
+
+  // TODO(4280): For now we play it safe and remove the bytecode array when we
+  // switch to baseline code. We might consider keeping around the bytecode so
+  // that it can be used as the "source of truth" eventually.
+  shared->ClearBytecodeArray();
+
+  // Update the shared function info with the scope info.
+  InstallSharedScopeInfo(&info, shared);
+
+  // Install compilation result on the shared function info
+  InstallSharedCompilationResult(&info, shared);
+
+  // Record the function compilation event.
+  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &info);
+
+  return info.code();
+}
+
 MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
   Isolate* isolate = function->GetIsolate();
   DCHECK(!isolate->has_pending_exception());
@@ -1129,35 +911,30 @@
   TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
   TRACE_EVENT0("v8", "V8.CompileCode");
   AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
-  // If the debugger is active, do not compile with turbofan unless we can
-  // deopt from turbofan code.
-  if (FLAG_turbo_asm && function->shared()->asm_function() &&
-      (FLAG_turbo_asm_deoptimization || !isolate->debug()->is_active()) &&
-      !FLAG_turbo_osr) {
-    CompilationInfoWithZone info(function);
 
-    VMState<COMPILER> state(isolate);
-    PostponeInterruptsScope postpone(isolate);
-
-    info.SetOptimizing();
-
-    if (GetOptimizedCodeNow(&info)) {
+  if (FLAG_turbo_cache_shared_code) {
+    Handle<Code> cached_code;
+    if (GetCodeFromOptimizedCodeMap(function, BailoutId::None())
+            .ToHandle(&cached_code)) {
+      if (FLAG_trace_opt) {
+        PrintF("[found optimized code for ");
+        function->ShortPrint();
+        PrintF(" during unoptimized compile]\n");
+      }
       DCHECK(function->shared()->is_compiled());
-      return info.code();
+      return cached_code;
     }
-    // We have failed compilation. If there was an exception clear it so that
-    // we can compile unoptimized code.
-    if (isolate->has_pending_exception()) isolate->clear_pending_exception();
   }
 
   if (function->shared()->is_compiled()) {
     return Handle<Code>(function->shared()->code());
   }
 
-  CompilationInfoWithZone info(function);
+  Zone zone(isolate->allocator());
+  ParseInfo parse_info(&zone, function);
+  CompilationInfo info(&parse_info, function);
   Handle<Code> result;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCodeCommon(&info),
-                             Code);
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCode(&info), Code);
 
   if (FLAG_always_opt) {
     Handle<Code> opt_code;
@@ -1171,59 +948,6 @@
 }
 
 
-bool CompileEvalForDebugging(Handle<JSFunction> function,
-                             Handle<SharedFunctionInfo> shared) {
-  Handle<Script> script(Script::cast(shared->script()));
-  Handle<Context> context(function->context());
-
-  Zone zone(function->GetIsolate()->allocator());
-  ParseInfo parse_info(&zone, script);
-  CompilationInfo info(&parse_info);
-  Isolate* isolate = info.isolate();
-
-  parse_info.set_eval();
-  parse_info.set_context(context);
-  if (context->IsNativeContext()) parse_info.set_global();
-  parse_info.set_toplevel();
-  parse_info.set_allow_lazy_parsing(false);
-  parse_info.set_language_mode(shared->language_mode());
-  parse_info.set_parse_restriction(NO_PARSE_RESTRICTION);
-  info.MarkAsDebug();
-
-  VMState<COMPILER> state(info.isolate());
-
-  if (!Parser::ParseStatic(&parse_info)) {
-    isolate->clear_pending_exception();
-    return false;
-  }
-
-  FunctionLiteral* lit = parse_info.literal();
-  LiveEditFunctionTracker live_edit_tracker(isolate, lit);
-
-  if (!CompileUnoptimizedCode(&info)) {
-    isolate->clear_pending_exception();
-    return false;
-  }
-  shared->ReplaceCode(*info.code());
-  return true;
-}
-
-
-bool CompileForDebugging(CompilationInfo* info) {
-  info->MarkAsDebug();
-  if (GetUnoptimizedCodeCommon(info).is_null()) {
-    info->isolate()->clear_pending_exception();
-    return false;
-  }
-  return true;
-}
-
-inline bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
-  return shared->is_toplevel() && shared->script()->IsScript() &&
-         Script::cast(shared->script())->compilation_type() ==
-             Script::COMPILATION_TYPE_EVAL;
-}
-
 Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
     Isolate* isolate, FunctionLiteral* literal, Handle<Script> script) {
   Handle<Code> code = isolate->builtins()->CompileLazy();
@@ -1294,61 +1018,55 @@
 
     DCHECK(!info->is_debug() || !parse_info->allow_lazy_parsing());
 
-    info->MarkAsFirstCompile();
-
     FunctionLiteral* lit = parse_info->literal();
-    LiveEditFunctionTracker live_edit_tracker(isolate, lit);
 
     // Measure how long it takes to do the compilation; only take the
     // rest of the function into account to avoid overlap with the
     // parsing statistics.
-    HistogramTimer* rate = info->is_eval()
-          ? info->isolate()->counters()->compile_eval()
-          : info->isolate()->counters()->compile();
+    RuntimeCallTimerScope runtimeTimer(
+        isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
+                                       : &RuntimeCallStats::Compile);
+    HistogramTimer* rate = parse_info->is_eval()
+                               ? info->isolate()->counters()->compile_eval()
+                               : info->isolate()->counters()->compile();
     HistogramTimerScope timer(rate);
-    TRACE_EVENT0("v8", info->is_eval() ? "V8.CompileEval" : "V8.Compile");
+    TRACE_EVENT0("v8", parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
 
     // Allocate a shared function info object.
     DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
     result = NewSharedFunctionInfoForLiteral(isolate, lit, script);
     result->set_is_toplevel(true);
-    if (info->is_eval()) {
+    if (parse_info->is_eval()) {
       // Eval scripts cannot be (re-)compiled without context.
       result->set_allows_lazy_compilation_without_context(false);
     }
     parse_info->set_shared_info(result);
 
     // Compile the code.
-    if (!CompileBaselineCode(info)) {
+    if (!CompileUnoptimizedCode(info)) {
       return Handle<SharedFunctionInfo>::null();
     }
 
+    // Update the shared function info with the scope info.
+    InstallSharedScopeInfo(info, result);
+
     // Install compilation result on the shared function info
-    Handle<ScopeInfo> scope_info =
-        ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
-    InstallBaselineCompilationResult(info, result, scope_info);
+    InstallSharedCompilationResult(info, result);
 
     Handle<String> script_name =
         script->name()->IsString()
             ? Handle<String>(String::cast(script->name()))
             : isolate->factory()->empty_string();
-    Logger::LogEventsAndTags log_tag = info->is_eval()
-        ? Logger::EVAL_TAG
-        : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
+    Logger::LogEventsAndTags log_tag =
+        parse_info->is_eval()
+            ? Logger::EVAL_TAG
+            : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
 
-    PROFILE(isolate, CodeCreateEvent(log_tag, *info->abstract_code(), *result,
-                                     info, *script_name));
-
-    // Hint to the runtime system used when allocating space for initial
-    // property space by setting the expected number of properties for
-    // the instances of the function.
-    SetExpectedNofPropertiesFromEstimate(result,
-                                         lit->expected_property_count());
+    PROFILE(isolate, CodeCreateEvent(log_tag, result->abstract_code(), *result,
+                                     *script_name));
 
     if (!script.is_null())
       script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
-
-    live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
   }
 
   return result;
@@ -1370,90 +1088,205 @@
 
 bool Compiler::ParseAndAnalyze(ParseInfo* info) {
   if (!Parser::ParseStatic(info)) return false;
-  return Compiler::Analyze(info);
+  if (!Compiler::Analyze(info)) return false;
+  DCHECK_NOT_NULL(info->literal());
+  DCHECK_NOT_NULL(info->scope());
+  return true;
 }
 
 bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
   if (function->is_compiled()) return true;
-  MaybeHandle<Code> maybe_code = GetLazyCode(function);
+  Isolate* isolate = function->GetIsolate();
+  DCHECK(AllowCompilation::IsAllowed(isolate));
+
+  // Start a compilation.
   Handle<Code> code;
-  if (!maybe_code.ToHandle(&code)) {
+  if (!GetLazyCode(function).ToHandle(&code)) {
     if (flag == CLEAR_EXCEPTION) {
-      function->GetIsolate()->clear_pending_exception();
+      isolate->clear_pending_exception();
     }
     return false;
   }
-  DCHECK(code->IsJavaScriptCode());
+
+  // Install code on closure.
   function->ReplaceCode(*code);
+
+  // Check postconditions on success.
+  DCHECK(!isolate->has_pending_exception());
+  DCHECK(function->shared()->is_compiled());
+  DCHECK(function->is_compiled());
+  return true;
+}
+
+bool Compiler::CompileBaseline(Handle<JSFunction> function) {
+  Isolate* isolate = function->GetIsolate();
+  DCHECK(AllowCompilation::IsAllowed(isolate));
+
+  // Start a compilation.
+  Handle<Code> code;
+  if (!GetBaselineCode(function).ToHandle(&code)) {
+    // Baseline generation failed, get unoptimized code.
+    DCHECK(function->shared()->is_compiled());
+    code = handle(function->shared()->code());
+    isolate->clear_pending_exception();
+  }
+
+  // Install code on closure.
+  function->ReplaceCode(*code);
+
+  // Check postconditions on success.
+  DCHECK(!isolate->has_pending_exception());
+  DCHECK(function->shared()->is_compiled());
   DCHECK(function->is_compiled());
   return true;
 }
 
 bool Compiler::CompileOptimized(Handle<JSFunction> function,
                                 ConcurrencyMode mode) {
+  if (function->IsOptimized()) return true;
+  Isolate* isolate = function->GetIsolate();
+  DCHECK(AllowCompilation::IsAllowed(isolate));
+
+  // Start a compilation.
   Handle<Code> code;
-  if (GetOptimizedCode(function, mode).ToHandle(&code)) {
-    // Optimization succeeded, return optimized code.
-    function->ReplaceCode(*code);
-  } else {
+  if (!GetOptimizedCode(function, mode).ToHandle(&code)) {
     // Optimization failed, get unoptimized code.
-    Isolate* isolate = function->GetIsolate();
-    if (isolate->has_pending_exception()) {  // Possible stack overflow.
-      return false;
-    }
-    code = Handle<Code>(function->shared()->code(), isolate);
-    if (code->kind() != Code::FUNCTION &&
-        code->kind() != Code::OPTIMIZED_FUNCTION) {
-      if (!GetUnoptimizedCode(function).ToHandle(&code)) {
+    DCHECK(!isolate->has_pending_exception());
+    if (function->shared()->is_compiled()) {
+      code = handle(function->shared()->code(), isolate);
+    } else {
+      Zone zone(isolate->allocator());
+      ParseInfo parse_info(&zone, function);
+      CompilationInfo info(&parse_info, function);
+      if (!GetUnoptimizedCode(&info).ToHandle(&code)) {
         return false;
       }
     }
-    function->ReplaceCode(*code);
   }
 
-  DCHECK(function->code()->kind() == Code::FUNCTION ||
-         function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
-         (function->code()->is_interpreter_entry_trampoline() &&
-          function->shared()->HasBytecodeArray()) ||
-         function->IsInOptimizationQueue());
+  // Install code on closure.
+  function->ReplaceCode(*code);
+
+  // Check postconditions on success.
+  DCHECK(!isolate->has_pending_exception());
+  DCHECK(function->shared()->is_compiled());
+  DCHECK(function->is_compiled());
   return true;
 }
 
 bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
-  Handle<SharedFunctionInfo> shared(function->shared());
-  if (IsEvalToplevel(shared)) {
-    return CompileEvalForDebugging(function, shared);
-  } else {
-    CompilationInfoWithZone info(function);
-    return CompileForDebugging(&info);
+  Isolate* isolate = function->GetIsolate();
+  DCHECK(AllowCompilation::IsAllowed(isolate));
+
+  // Start a compilation.
+  Zone zone(isolate->allocator());
+  ParseInfo parse_info(&zone, function);
+  CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+  if (IsEvalToplevel(handle(function->shared()))) {
+    parse_info.set_eval();
+    if (function->context()->IsNativeContext()) parse_info.set_global();
+    parse_info.set_toplevel();
+    parse_info.set_allow_lazy_parsing(false);
+    parse_info.set_lazy(false);
   }
+  info.MarkAsDebug();
+  if (GetUnoptimizedCode(&info).is_null()) {
+    isolate->clear_pending_exception();
+    return false;
+  }
+
+  // Check postconditions on success.
+  DCHECK(!isolate->has_pending_exception());
+  DCHECK(function->shared()->is_compiled());
+  DCHECK(function->shared()->HasDebugCode());
+  return true;
 }
 
 bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
+  Isolate* isolate = shared->GetIsolate();
+  DCHECK(AllowCompilation::IsAllowed(isolate));
+
+  // Start a compilation.
+  Zone zone(isolate->allocator());
+  ParseInfo parse_info(&zone, shared);
+  CompilationInfo info(&parse_info, Handle<JSFunction>::null());
   DCHECK(shared->allows_lazy_compilation_without_context());
   DCHECK(!IsEvalToplevel(shared));
-  Zone zone(shared->GetIsolate()->allocator());
-  ParseInfo parse_info(&zone, shared);
-  CompilationInfo info(&parse_info);
-  return CompileForDebugging(&info);
+  info.MarkAsDebug();
+  if (GetUnoptimizedCode(&info).is_null()) {
+    isolate->clear_pending_exception();
+    return false;
+  }
+
+  // Check postconditions on success.
+  DCHECK(!isolate->has_pending_exception());
+  DCHECK(shared->is_compiled());
+  DCHECK(shared->HasDebugCode());
+  return true;
+}
+
+MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
+  Isolate* isolate = script->GetIsolate();
+  DCHECK(AllowCompilation::IsAllowed(isolate));
+
+  // In order to ensure that live edit function info collection finds the newly
+  // generated shared function infos, clear the script's list temporarily
+  // and restore it at the end of this method.
+  Handle<Object> old_function_infos(script->shared_function_infos(), isolate);
+  script->set_shared_function_infos(Smi::FromInt(0));
+
+  // Start a compilation.
+  Zone zone(isolate->allocator());
+  ParseInfo parse_info(&zone, script);
+  CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+  parse_info.set_global();
+  info.MarkAsDebug();
+
+  // TODO(635): support extensions.
+  const bool compilation_succeeded = !CompileToplevel(&info).is_null();
+  Handle<JSArray> infos;
+  if (compilation_succeeded) {
+    // Check postconditions on success.
+    DCHECK(!isolate->has_pending_exception());
+    infos = LiveEditFunctionTracker::Collect(parse_info.literal(), script,
+                                             &zone, isolate);
+  }
+
+  // Restore the original function info list in order to remain side-effect
+  // free as much as possible, since some code expects the old shared function
+  // infos to stick around.
+  script->set_shared_function_infos(*old_function_infos);
+
+  return infos;
 }
 
 // TODO(turbofan): In the future, unoptimized code with deopt support could
 // be generated lazily once deopt is triggered.
 bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
   DCHECK_NOT_NULL(info->literal());
-  DCHECK(info->has_scope());
+  DCHECK_NOT_NULL(info->scope());
   Handle<SharedFunctionInfo> shared = info->shared_info();
   if (!shared->has_deoptimization_support()) {
-    // TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
-    CompilationInfoWithZone unoptimized(info->closure());
-    // Note that we use the same AST that we will use for generating the
-    // optimized code.
-    ParseInfo* parse_info = unoptimized.parse_info();
-    parse_info->set_literal(info->literal());
-    parse_info->set_scope(info->scope());
-    parse_info->set_context(info->context());
+    Zone zone(info->isolate()->allocator());
+    CompilationInfo unoptimized(info->parse_info(), info->closure());
     unoptimized.EnableDeoptimizationSupport();
+
+    // TODO(4280): For now we do not switch generators to baseline code because
+    // there might be suspended activations stored in generator objects on the
+    // heap. We could eventually go directly to TurboFan in this case.
+    if (shared->is_generator()) return false;
+
+    // TODO(4280): For now we disable switching to baseline code in the presence
+    // of interpreter activations of the given function. The reasons are:
+    //  1) The debugger assumes each function is either full-code or bytecode.
+    //  2) The underlying bytecode is cleared below, breaking stack unwinding.
+    // The expensive check for activations only needs to be done when the given
+    // function has bytecode, otherwise we can be sure there are no activations.
+    if (shared->HasBytecodeArray() &&
+        HasInterpreterActivations(info->isolate(), *shared)) {
+      return false;
+    }
+
     // If the current code has reloc info for serialization, also include
     // reloc info for serialization for the new code, so that deopt support
     // can be added without losing IC state.
@@ -1464,48 +1297,32 @@
     EnsureFeedbackVector(&unoptimized);
     if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
 
-    shared->EnableDeoptimizationSupport(*unoptimized.code());
-
-    info->MarkAsCompiled();
+    // TODO(4280): For now we play it safe and remove the bytecode array when we
+    // switch to baseline code. We might consider keeping around the bytecode so
+    // that it can be used as the "source of truth" eventually.
+    shared->ClearBytecodeArray();
 
     // The scope info might not have been set if a lazily compiled
     // function is inlined before being called for the first time.
     if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
-      Handle<ScopeInfo> target_scope_info =
-          ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
-      shared->set_scope_info(*target_scope_info);
+      InstallSharedScopeInfo(info, shared);
     }
 
+    // Install compilation result on the shared function info
+    shared->EnableDeoptimizationSupport(*unoptimized.code());
+
     // The existing unoptimized code was replaced with the new one.
-    RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
+    RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized);
   }
   return true;
 }
 
-void Compiler::CompileForLiveEdit(Handle<Script> script) {
-  // TODO(635): support extensions.
-  Zone zone(script->GetIsolate()->allocator());
-  ParseInfo parse_info(&zone, script);
-  CompilationInfo info(&parse_info);
-  PostponeInterruptsScope postpone(info.isolate());
-  VMState<COMPILER> state(info.isolate());
-
-  // Get rid of old list of shared function infos.
-  info.MarkAsFirstCompile();
-  info.MarkAsDebug();
-  info.parse_info()->set_global();
-  if (!Parser::ParseStatic(info.parse_info())) return;
-
-  LiveEditFunctionTracker tracker(info.isolate(), parse_info.literal());
-  if (!CompileUnoptimizedCode(&info)) return;
-  tracker.RecordRootFunctionInfo(info.code());
-}
-
 MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
     Handle<String> source, Handle<SharedFunctionInfo> outer_info,
     Handle<Context> context, LanguageMode language_mode,
-    ParseRestriction restriction, int line_offset, int column_offset,
-    Handle<Object> script_name, ScriptOriginOptions options) {
+    ParseRestriction restriction, int eval_scope_position, int eval_position,
+    int line_offset, int column_offset, Handle<Object> script_name,
+    ScriptOriginOptions options) {
   Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_eval_size()->Increment(source_length);
@@ -1514,7 +1331,7 @@
   CompilationCache* compilation_cache = isolate->compilation_cache();
   MaybeHandle<SharedFunctionInfo> maybe_shared_info =
       compilation_cache->LookupEval(source, outer_info, context, language_mode,
-                                    line_offset);
+                                    eval_scope_position);
   Handle<SharedFunctionInfo> shared_info;
 
   Handle<Script> script;
@@ -1526,33 +1343,28 @@
       script->set_column_offset(column_offset);
     }
     script->set_origin_options(options);
+    script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
+    Script::SetEvalOrigin(script, outer_info, eval_position);
+
     Zone zone(isolate->allocator());
     ParseInfo parse_info(&zone, script);
-    CompilationInfo info(&parse_info);
+    CompilationInfo info(&parse_info, Handle<JSFunction>::null());
     parse_info.set_eval();
     if (context->IsNativeContext()) parse_info.set_global();
     parse_info.set_language_mode(language_mode);
     parse_info.set_parse_restriction(restriction);
     parse_info.set_context(context);
 
-    Debug::RecordEvalCaller(script);
-
     shared_info = CompileToplevel(&info);
 
     if (shared_info.is_null()) {
       return MaybeHandle<JSFunction>();
     } else {
-      // Explicitly disable optimization for eval code. We're not yet prepared
-      // to handle eval-code in the optimizing compiler.
-      if (restriction != ONLY_SINGLE_FUNCTION_LITERAL) {
-        shared_info->DisableOptimization(kEval);
-      }
-
       // If caller is strict mode, the result must be in strict mode as well.
       DCHECK(is_sloppy(language_mode) ||
              is_strict(shared_info->language_mode()));
       compilation_cache->PutEval(source, outer_info, context, shared_info,
-                                 line_offset);
+                                 eval_scope_position);
     }
   }
 
@@ -1627,8 +1439,10 @@
     timer.Start();
   }
 
-  if (!maybe_result.ToHandle(&result)) {
-    // No cache entry found. Compile the script.
+  if (!maybe_result.ToHandle(&result) ||
+      (FLAG_serialize_toplevel &&
+       compile_options == ScriptCompiler::kProduceCodeCache)) {
+    // No cache entry found, or embedder wants a code cache. Compile the script.
 
     // Create a script object describing the script to be compiled.
     Handle<Script> script = isolate->factory()->NewScript(source);
@@ -1652,7 +1466,7 @@
     // Compile the function and add it to the cache.
     Zone zone(isolate->allocator());
     ParseInfo parse_info(&zone, script);
-    CompilationInfo info(&parse_info);
+    CompilationInfo info(&parse_info, Handle<JSFunction>::null());
     if (is_module) {
       parse_info.set_module();
     } else {
@@ -1670,7 +1484,7 @@
     }
 
     parse_info.set_language_mode(
-        static_cast<LanguageMode>(info.language_mode() | language_mode));
+        static_cast<LanguageMode>(parse_info.language_mode() | language_mode));
     result = CompileToplevel(&info);
     if (extension == NULL && !result.is_null()) {
       compilation_cache->PutScript(source, context, language_mode, result);
@@ -1709,7 +1523,7 @@
   parse_info->set_language_mode(
       static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
 
-  CompilationInfo compile_info(parse_info);
+  CompilationInfo compile_info(parse_info, Handle<JSFunction>::null());
 
   // The source was parsed lazily, so compiling for debugging is not possible.
   DCHECK(!compile_info.is_debug());
@@ -1726,7 +1540,9 @@
   // Precondition: code has been parsed and scopes have been analyzed.
   Isolate* isolate = outer_info->isolate();
   MaybeHandle<SharedFunctionInfo> maybe_existing;
-  if (outer_info->is_first_compile()) {
+
+  // Find any previously allocated shared function info for the given literal.
+  if (outer_info->shared_info()->never_compiled()) {
     // On the first compile, there are no existing shared function info for
     // inner functions yet, so do not try to find them. All bets are off for
     // live edit though.
@@ -1735,6 +1551,7 @@
   } else {
     maybe_existing = script->FindSharedFunctionInfo(literal);
   }
+
   // We found an existing shared function info. If it's already compiled,
   // don't worry about compiling it, and simply return it. If it's not yet
   // compiled, continue to decide whether to eagerly compile.
@@ -1742,6 +1559,7 @@
   // unless we already have code with debut break slots.
   Handle<SharedFunctionInfo> existing;
   if (maybe_existing.ToHandle(&existing) && existing->is_compiled()) {
+    DCHECK(!existing->is_toplevel());
     if (!outer_info->is_debug() || existing->HasDebugCode()) {
       return existing;
     }
@@ -1752,20 +1570,23 @@
   if (!maybe_existing.ToHandle(&result)) {
     result = NewSharedFunctionInfoForLiteral(isolate, literal, script);
     result->set_is_toplevel(false);
+
+    // If the outer function has been compiled before, we cannot be sure that
+    // shared function info for this function literal has been created for the
+    // first time. It may have already been compiled previously.
+    result->set_never_compiled(outer_info->shared_info()->never_compiled());
   }
 
   Zone zone(isolate->allocator());
   ParseInfo parse_info(&zone, script);
-  CompilationInfo info(&parse_info);
+  CompilationInfo info(&parse_info, Handle<JSFunction>::null());
   parse_info.set_literal(literal);
   parse_info.set_shared_info(result);
   parse_info.set_scope(literal->scope());
   parse_info.set_language_mode(literal->scope()->language_mode());
   if (outer_info->will_serialize()) info.PrepareForSerializing();
-  if (outer_info->is_first_compile()) info.MarkAsFirstCompile();
   if (outer_info->is_debug()) info.MarkAsDebug();
 
-  LiveEditFunctionTracker live_edit_tracker(isolate, literal);
   // Determine if the function can be lazily compiled. This is necessary to
   // allow some of our builtin JS files to be lazily compiled. These
   // builtins cannot be handled lazily by the parser, since we have to know
@@ -1775,13 +1596,7 @@
   // aggressive about lazy compilation, because it might trigger compilation
   // of functions without an outer context when setting a breakpoint through
   // Debug::FindSharedFunctionInfoInScript.
-  bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
-  // Compile eagerly for live edit. When compiling debug code, eagerly compile
-  // unless we can lazily compile without the context.
-  bool allow_lazy = literal->AllowsLazyCompilation() &&
-                    !LiveEditFunctionTracker::IsActive(isolate) &&
-                    (!info.is_debug() || allow_lazy_without_ctx);
-
+  bool allow_lazy = literal->AllowsLazyCompilation() && !info.is_debug();
   bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
 
   // Consider compiling eagerly when targeting the code cache.
@@ -1796,37 +1611,24 @@
   TRACE_EVENT0("v8", "V8.CompileCode");
   if (lazy) {
     info.SetCode(isolate->builtins()->CompileLazy());
-  } else if (Renumber(info.parse_info()) && GenerateBaselineCode(&info)) {
+  } else if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
     // Code generation will ensure that the feedback vector is present and
     // appropriately sized.
     DCHECK(!info.code().is_null());
-    Handle<ScopeInfo> scope_info =
-        ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
     if (literal->should_eager_compile() &&
         literal->should_be_used_once_hint()) {
       info.code()->MarkToBeExecutedOnce(isolate);
     }
+    // Update the shared function info with the scope info.
+    InstallSharedScopeInfo(&info, result);
     // Install compilation result on the shared function info.
-    InstallBaselineCompilationResult(&info, result, scope_info);
+    InstallSharedCompilationResult(&info, result);
   } else {
     return Handle<SharedFunctionInfo>::null();
   }
 
   if (maybe_existing.is_null()) {
-    // If the outer function has been compiled before, we cannot be sure that
-    // shared function info for this function literal has been created for the
-    // first time. It may have already been compiled previously.
-    result->set_never_compiled(outer_info->is_first_compile() && lazy);
-
-    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
-    result->set_allows_lazy_compilation(literal->AllowsLazyCompilation());
-    result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
-
-    // Set the expected number of properties for instances and return
-    // the resulting function.
-    SetExpectedNofPropertiesFromEstimate(result,
-                                         literal->expected_property_count());
-    live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
+    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info);
   }
 
   return result;
@@ -1872,10 +1674,10 @@
   return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
 }
 
-void Compiler::FinalizeOptimizedCompileJob(OptimizedCompileJob* job) {
-  // Take ownership of compilation info.  Deleting compilation info
-  // also tears down the zone and the recompile job.
-  base::SmartPointer<CompilationInfo> info(job->info());
+void Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
+  // Take ownership of compilation job.  Deleting job also tears down the zone.
+  base::SmartPointer<CompilationJob> job(raw_job);
+  CompilationInfo* info = job->info();
   Isolate* isolate = info->isolate();
 
   VMState<COMPILER> state(isolate);
@@ -1892,16 +1694,17 @@
   //    Except when OSR already disabled optimization for some reason.
   // 3) The code may have already been invalidated due to dependency change.
   // 4) Code generation may have failed.
-  if (job->last_status() == OptimizedCompileJob::SUCCEEDED) {
+  if (job->last_status() == CompilationJob::SUCCEEDED) {
     if (shared->optimization_disabled()) {
       job->RetryOptimization(kOptimizationDisabled);
     } else if (info->dependencies()->HasAborted()) {
       job->RetryOptimization(kBailedOutDueToDependencyChange);
-    } else if (job->GenerateCode() == OptimizedCompileJob::SUCCEEDED) {
-      RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info.get(), shared);
+    } else if (job->GenerateCode() == CompilationJob::SUCCEEDED) {
+      job->RecordOptimizationStats();
+      RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info);
       if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
                                          info->osr_ast_id()).code == nullptr) {
-        InsertCodeIntoOptimizedCodeMap(info.get());
+        InsertCodeIntoOptimizedCodeMap(info);
       }
       if (FLAG_trace_opt) {
         PrintF("[completed optimizing ");
@@ -1913,7 +1716,7 @@
     }
   }
 
-  DCHECK(job->last_status() != OptimizedCompileJob::SUCCEEDED);
+  DCHECK(job->last_status() != CompilationJob::SUCCEEDED);
   if (FLAG_trace_opt) {
     PrintF("[aborted optimizing ");
     info->closure()->ShortPrint();
diff --git a/src/compiler.h b/src/compiler.h
index fa04399..64bc88d 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -17,8 +17,8 @@
 
 // Forward declarations.
 class CompilationInfo;
+class CompilationJob;
 class JavaScriptFrame;
-class OptimizedCompileJob;
 class ParseInfo;
 class ScriptData;
 
@@ -44,13 +44,14 @@
   // given function holds (except for live-edit, which compiles the world).
 
   static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
+  static bool CompileBaseline(Handle<JSFunction> function);
   static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
   static bool CompileDebugCode(Handle<JSFunction> function);
   static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
-  static void CompileForLiveEdit(Handle<Script> script);
+  static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
 
-  // Generate and install code from previously queued optimization job.
-  static void FinalizeOptimizedCompileJob(OptimizedCompileJob* job);
+  // Generate and install code from previously queued compilation job.
+  static void FinalizeCompilationJob(CompilationJob* job);
 
   // Give the compiler a chance to perform low-latency initialization tasks of
   // the given {function} on its instantiation. Note that only the runtime will
@@ -77,7 +78,8 @@
   MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
       Handle<String> source, Handle<SharedFunctionInfo> outer_info,
       Handle<Context> context, LanguageMode language_mode,
-      ParseRestriction restriction, int line_offset, int column_offset = 0,
+      ParseRestriction restriction, int eval_scope_position, int eval_position,
+      int line_offset = 0, int column_offset = 0,
       Handle<Object> script_name = Handle<Object>(),
       ScriptOriginOptions options = ScriptOriginOptions());
 
@@ -118,26 +120,10 @@
       JavaScriptFrame* osr_frame);
 };
 
-struct InlinedFunctionInfo {
-  InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
-                      int script_id, int start_position)
-      : parent_id(parent_id),
-        inline_position(inline_position),
-        script_id(script_id),
-        start_position(start_position) {}
-  int parent_id;
-  SourcePosition inline_position;
-  int script_id;
-  int start_position;
-  std::vector<size_t> deopt_pc_offsets;
-
-  static const int kNoParentId = -1;
-};
-
 
 // CompilationInfo encapsulates some information known at compile time.  It
 // is constructed based on the resources available at compile-time.
-class CompilationInfo {
+class CompilationInfo final {
  public:
   // Various configuration flags for a compilation, as well as some properties
   // of the compiled code produced by a compilation.
@@ -154,19 +140,18 @@
     kFrameSpecializing = 1 << 9,
     kNativeContextSpecializing = 1 << 10,
     kInliningEnabled = 1 << 11,
-    kTypingEnabled = 1 << 12,
-    kDisableFutureOptimization = 1 << 13,
-    kSplittingEnabled = 1 << 14,
-    kDeoptimizationEnabled = 1 << 16,
-    kSourcePositionsEnabled = 1 << 17,
-    kFirstCompile = 1 << 18,
-    kBailoutOnUninitialized = 1 << 19,
+    kDisableFutureOptimization = 1 << 12,
+    kSplittingEnabled = 1 << 13,
+    kDeoptimizationEnabled = 1 << 14,
+    kSourcePositionsEnabled = 1 << 15,
+    kBailoutOnUninitialized = 1 << 16,
+    kOptimizeFromBytecode = 1 << 17,
   };
 
-  explicit CompilationInfo(ParseInfo* parse_info);
-  CompilationInfo(const char* debug_name, Isolate* isolate, Zone* zone,
+  CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
+  CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
                   Code::Flags code_flags = Code::ComputeFlags(Code::STUB));
-  virtual ~CompilationInfo();
+  ~CompilationInfo();
 
   ParseInfo* parse_info() const { return parse_info_; }
 
@@ -174,19 +159,11 @@
   // TODO(titzer): inline and delete accessors of ParseInfo
   // -----------------------------------------------------------
   Handle<Script> script() const;
-  bool is_eval() const;
-  bool is_native() const;
-  bool is_module() const;
-  LanguageMode language_mode() const;
-  Handle<JSFunction> closure() const;
   FunctionLiteral* literal() const;
   Scope* scope() const;
   Handle<Context> context() const;
   Handle<SharedFunctionInfo> shared_info() const;
   bool has_shared_info() const;
-  bool has_context() const;
-  bool has_literal() const;
-  bool has_scope() const;
   // -----------------------------------------------------------
 
   Isolate* isolate() const {
@@ -194,14 +171,14 @@
   }
   Zone* zone() { return zone_; }
   bool is_osr() const { return !osr_ast_id_.IsNone(); }
+  Handle<JSFunction> closure() const { return closure_; }
   Handle<Code> code() const { return code_; }
   Code::Flags code_flags() const { return code_flags_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
-  int opt_count() const { return opt_count_; }
+  JavaScriptFrame* osr_frame() const { return osr_frame_; }
   int num_parameters() const;
   int num_parameters_including_this() const;
   bool is_this_defined() const;
-  int num_heap_slots() const;
 
   void set_parameter_count(int parameter_count) {
     DCHECK(IsStub());
@@ -211,11 +188,6 @@
   bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
   Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
 
-  Handle<AbstractCode> abstract_code() const {
-    return has_bytecode_array() ? Handle<AbstractCode>::cast(bytecode_array())
-                                : Handle<AbstractCode>::cast(code());
-  }
-
   bool is_tracking_positions() const { return track_positions_; }
 
   bool is_calling() const {
@@ -294,26 +266,22 @@
 
   bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
 
-  void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
-
-  bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
-
   void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
 
   bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
 
-  void MarkAsFirstCompile() { SetFlag(kFirstCompile); }
-
-  void MarkAsCompiled() { SetFlag(kFirstCompile, false); }
-
-  bool is_first_compile() const { return GetFlag(kFirstCompile); }
-
   void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
 
   bool is_bailout_on_uninitialized() const {
     return GetFlag(kBailoutOnUninitialized);
   }
 
+  void MarkAsOptimizeFromBytecode() { SetFlag(kOptimizeFromBytecode); }
+
+  bool is_optimizing_from_bytecode() const {
+    return GetFlag(kOptimizeFromBytecode);
+  }
+
   bool GeneratePreagedPrologue() const {
     // Generate a pre-aged prologue if we are optimizing for size, which
     // will make code flushing more aggressive. Only apply to Code::FUNCTION,
@@ -357,9 +325,10 @@
     code_flags_ =
         Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
   }
-  void SetOptimizingForOsr(BailoutId osr_ast_id) {
+  void SetOptimizingForOsr(BailoutId osr_ast_id, JavaScriptFrame* osr_frame) {
     SetOptimizing();
     osr_ast_id_ = osr_ast_id;
+    osr_frame_ = osr_frame;
   }
 
   // Deoptimization support.
@@ -383,7 +352,7 @@
   }
 
   void ReopenHandlesInNewHandleScope() {
-    // Empty for now but will be needed once fields move from ParseInfo.
+    closure_ = Handle<JSFunction>(*closure_);
   }
 
   void AbortOptimization(BailoutReason reason) {
@@ -410,23 +379,8 @@
     prologue_offset_ = prologue_offset;
   }
 
-  int start_position_for(uint32_t inlining_id) {
-    return inlined_function_infos_.at(inlining_id).start_position;
-  }
-  const std::vector<InlinedFunctionInfo>& inlined_function_infos() {
-    return inlined_function_infos_;
-  }
-
-  void LogDeoptCallPosition(int pc_offset, int inlining_id);
-  int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
-                           SourcePosition position, int pareint_id);
-
   CompilationDependencies* dependencies() { return &dependencies_; }
 
-  bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
-    return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure());
-  }
-
   int optimization_id() const { return optimization_id_; }
 
   int osr_expr_stack_height() { return osr_expr_stack_height_; }
@@ -434,8 +388,6 @@
     DCHECK(height >= 0);
     osr_expr_stack_height_ = height;
   }
-  JavaScriptFrame* osr_frame() const { return osr_frame_; }
-  void set_osr_frame(JavaScriptFrame* osr_frame) { osr_frame_ = osr_frame; }
 
 #if DEBUG
   void PrintAstForTesting();
@@ -474,6 +426,8 @@
 
   StackFrame::Type GetOutputStackFrameType() const;
 
+  int GetDeclareGlobalsFlags() const;
+
  protected:
   ParseInfo* parse_info_;
 
@@ -505,7 +459,7 @@
     STUB
   };
 
-  CompilationInfo(ParseInfo* parse_info, const char* debug_name,
+  CompilationInfo(ParseInfo* parse_info, Vector<const char> debug_name,
                   Code::Flags code_flags, Mode mode, Isolate* isolate,
                   Zone* zone);
 
@@ -527,6 +481,8 @@
 
   Code::Flags code_flags_;
 
+  Handle<JSFunction> closure_;
+
   // The compiled code.
   Handle<Code> code_;
 
@@ -552,15 +508,10 @@
 
   int prologue_offset_;
 
-  std::vector<InlinedFunctionInfo> inlined_function_infos_;
   bool track_positions_;
 
   InlinedFunctionList inlined_functions_;
 
-  // A copy of shared_info()->opt_count() to avoid handle deref
-  // during graph optimization.
-  int opt_count_;
-
   // Number of parameters used for compilation of stubs that require arguments.
   int parameter_count_;
 
@@ -571,29 +522,30 @@
   // The current OSR frame for specialization or {nullptr}.
   JavaScriptFrame* osr_frame_ = nullptr;
 
-  const char* debug_name_;
+  Vector<const char> debug_name_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
 };
 
-
-class HGraph;
-class LChunk;
-
-// A helper class that calls the three compilation phases in
-// Crankshaft and keeps track of its state.  The three phases
-// CreateGraph, OptimizeGraph and GenerateAndInstallCode can either
-// fail, bail-out to the full code generator or succeed.  Apart from
-// their return value, the status of the phase last run can be checked
-// using last_status().
-class OptimizedCompileJob: public ZoneObject {
+// A base class for compilation jobs intended to run concurrent to the main
+// thread. The job is split into three phases which are called in sequence on
+// different threads and with different limitations:
+//  1) CreateGraph:   Runs on main thread. No major limitations.
+//  2) OptimizeGraph: Runs concurrently. No heap allocation or handle derefs.
+//  3) GenerateCode:  Runs on main thread. No dependency changes.
+//
+// Each of the three phases can either fail or succeed. Apart from their return
+// value, the status of the phase last run can be checked using {last_status()}
+// as well. When failing we distinguish between the following levels:
+//  a) AbortOptimization: Persistent failure, disable future optimization.
+//  b) RetryOptimzation: Transient failure, try again next time.
+class CompilationJob {
  public:
-  explicit OptimizedCompileJob(CompilationInfo* info)
-      : info_(info), graph_(NULL), chunk_(NULL), last_status_(FAILED) {}
+  explicit CompilationJob(CompilationInfo* info, const char* compiler_name)
+      : info_(info), compiler_name_(compiler_name), last_status_(SUCCEEDED) {}
+  virtual ~CompilationJob() {}
 
-  enum Status {
-    FAILED, BAILED_OUT, SUCCEEDED
-  };
+  enum Status { FAILED, SUCCEEDED };
 
   MUST_USE_RESULT Status CreateGraph();
   MUST_USE_RESULT Status OptimizeGraph();
@@ -605,44 +557,36 @@
 
   Status RetryOptimization(BailoutReason reason) {
     info_->RetryOptimization(reason);
-    return SetLastStatus(BAILED_OUT);
+    return SetLastStatus(FAILED);
   }
 
   Status AbortOptimization(BailoutReason reason) {
     info_->AbortOptimization(reason);
-    return SetLastStatus(BAILED_OUT);
+    return SetLastStatus(FAILED);
   }
 
+  void RecordOptimizationStats();
+
+ protected:
+  void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
+
+  // Overridden by the actual implementation.
+  virtual Status CreateGraphImpl() = 0;
+  virtual Status OptimizeGraphImpl() = 0;
+  virtual Status GenerateCodeImpl() = 0;
+
  private:
   CompilationInfo* info_;
-  HGraph* graph_;
-  LChunk* chunk_;
   base::TimeDelta time_taken_to_create_graph_;
   base::TimeDelta time_taken_to_optimize_;
   base::TimeDelta time_taken_to_codegen_;
+  const char* compiler_name_;
   Status last_status_;
 
   MUST_USE_RESULT Status SetLastStatus(Status status) {
     last_status_ = status;
     return last_status_;
   }
-  void RecordOptimizationStats();
-
-  struct Timer {
-    Timer(OptimizedCompileJob* job, base::TimeDelta* location)
-        : job_(job), location_(location) {
-      DCHECK(location_ != NULL);
-      timer_.Start();
-    }
-
-    ~Timer() {
-      *location_ += timer_.Elapsed();
-    }
-
-    OptimizedCompileJob* job_;
-    base::ElapsedTimer timer_;
-    base::TimeDelta* location_;
-  };
 };
 
 }  // namespace internal
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 722bbf0..d4187fa 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -16,36 +16,39 @@
 
 // static
 FieldAccess AccessBuilder::ForMap() {
-  FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
-                        MaybeHandle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase, HeapObject::kMapOffset,   MaybeHandle<Name>(),
+      Type::Any(), MachineType::AnyTagged(), kMapWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForHeapNumberValue() {
-  FieldAccess access = {kTaggedBase, HeapNumber::kValueOffset,
-                        MaybeHandle<Name>(), TypeCache().Get().kFloat64,
-                        MachineType::Float64()};
+  FieldAccess access = {kTaggedBase,
+                        HeapNumber::kValueOffset,
+                        MaybeHandle<Name>(),
+                        TypeCache::Get().kFloat64,
+                        MachineType::Float64(),
+                        kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectProperties() {
-  FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
-                        MaybeHandle<Name>(), Type::Internal(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase,      JSObject::kPropertiesOffset, MaybeHandle<Name>(),
+      Type::Internal(), MachineType::AnyTagged(),    kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectElements() {
-  FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
-                        MaybeHandle<Name>(), Type::Internal(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase,      JSObject::kElementsOffset, MaybeHandle<Name>(),
+      Type::Internal(), MachineType::AnyTagged(),  kPointerWriteBarrier};
   return access;
 }
 
@@ -54,39 +57,93 @@
 FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
                                                        int index) {
   int const offset = map->GetInObjectPropertyOffset(index);
-  FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(),
-                        Type::Tagged(), MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        offset,
+                        MaybeHandle<Name>(),
+                        Type::Tagged(),
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
 
 // static
+FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
+  FieldAccess access = {kTaggedBase,
+                        JSFunction::kPrototypeOrInitialMapOffset,
+                        MaybeHandle<Name>(),
+                        Type::Any(),
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  return access;
+}
+
+// static
 FieldAccess AccessBuilder::ForJSFunctionContext() {
-  FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
-                        MaybeHandle<Name>(), Type::Internal(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase,      JSFunction::kContextOffset, MaybeHandle<Name>(),
+      Type::Internal(), MachineType::AnyTagged(),   kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
-  FieldAccess access = {kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
-                        Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        JSFunction::kSharedFunctionInfoOffset,
+                        Handle<Name>(),
+                        Type::Any(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
+FieldAccess AccessBuilder::ForJSFunctionLiterals() {
+  FieldAccess access = {
+      kTaggedBase,      JSFunction::kLiteralsOffset, Handle<Name>(),
+      Type::Internal(), MachineType::AnyTagged(),    kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
+  FieldAccess access = {kTaggedBase,
+                        JSFunction::kCodeEntryOffset,
+                        Handle<Name>(),
+                        Type::UntaggedPointer(),
+                        MachineType::Pointer(),
+                        kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
+  FieldAccess access = {kTaggedBase,
+                        JSFunction::kNextFunctionLinkOffset,
+                        Handle<Name>(),
+                        Type::Any(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
 FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
   TypeCache const& type_cache = TypeCache::Get();
-  FieldAccess access = {kTaggedBase, JSArray::kLengthOffset, Handle<Name>(),
+  FieldAccess access = {kTaggedBase,
+                        JSArray::kLengthOffset,
+                        Handle<Name>(),
                         type_cache.kJSArrayLengthType,
-                        MachineType::AnyTagged()};
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   if (IsFastDoubleElementsKind(elements_kind)) {
     access.type = type_cache.kFixedDoubleArrayLengthType;
+    access.write_barrier_kind = kNoWriteBarrier;
   } else if (IsFastElementsKind(elements_kind)) {
     access.type = type_cache.kFixedArrayLengthType;
+    access.write_barrier_kind = kNoWriteBarrier;
   }
   return access;
 }
@@ -94,190 +151,228 @@
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
-  FieldAccess access = {kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
-                        MaybeHandle<Name>(), Type::UntaggedPointer(),
-                        MachineType::Pointer()};
+  FieldAccess access = {kTaggedBase,
+                        JSArrayBuffer::kBackingStoreOffset,
+                        MaybeHandle<Name>(),
+                        Type::UntaggedPointer(),
+                        MachineType::Pointer(),
+                        kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
-  FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
+  FieldAccess access = {kTaggedBase,         JSArrayBuffer::kBitFieldOffset,
                         MaybeHandle<Name>(), TypeCache::Get().kInt8,
-                        MachineType::Int8()};
+                        MachineType::Int8(), kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
-  FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
-                        MaybeHandle<Name>(), Type::TaggedPointer(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        JSArrayBufferView::kBufferOffset,
+                        MaybeHandle<Name>(),
+                        Type::TaggedPointer(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
-  FieldAccess access = {
-      kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
-      MaybeHandle<Name>(), Type::Number(), MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        JSDate::kValueOffset + index * kPointerSize,
+                        MaybeHandle<Name>(),
+                        Type::Number(),
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSIteratorResultDone() {
-  FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
-                        MaybeHandle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase, JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
+      Type::Any(), MachineType::AnyTagged(),      kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSIteratorResultValue() {
-  FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
-                        MaybeHandle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase, JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
+      Type::Any(), MachineType::AnyTagged(),       kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSRegExpFlags() {
-  FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
-                        MaybeHandle<Name>(), Type::Tagged(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase,    JSRegExp::kFlagsOffset,   MaybeHandle<Name>(),
+      Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSRegExpSource() {
-  FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
-                        MaybeHandle<Name>(), Type::Tagged(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase,    JSRegExp::kSourceOffset,  MaybeHandle<Name>(),
+      Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForFixedArrayLength() {
-  FieldAccess access = {
-      kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
-      TypeCache::Get().kFixedArrayLengthType, MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        FixedArray::kLengthOffset,
+                        MaybeHandle<Name>(),
+                        TypeCache::Get().kFixedArrayLengthType,
+                        MachineType::AnyTagged(),
+                        kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
-  FieldAccess access = {kTaggedBase, DescriptorArray::kEnumCacheOffset,
-                        Handle<Name>(), Type::TaggedPointer(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        DescriptorArray::kEnumCacheOffset,
+                        Handle<Name>(),
+                        Type::TaggedPointer(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
-  FieldAccess access = {
-      kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset, Handle<Name>(),
-      Type::TaggedPointer(), MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        DescriptorArray::kEnumCacheBridgeCacheOffset,
+                        Handle<Name>(),
+                        Type::TaggedPointer(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapBitField() {
-  FieldAccess access = {kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
-                        TypeCache::Get().kUint8, MachineType::Uint8()};
+  FieldAccess access = {kTaggedBase,          Map::kBitFieldOffset,
+                        Handle<Name>(),       TypeCache::Get().kUint8,
+                        MachineType::Uint8(), kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapBitField3() {
-  FieldAccess access = {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
-                        TypeCache::Get().kInt32, MachineType::Int32()};
+  FieldAccess access = {kTaggedBase,          Map::kBitField3Offset,
+                        Handle<Name>(),       TypeCache::Get().kInt32,
+                        MachineType::Int32(), kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapDescriptors() {
-  FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
-                        Type::TaggedPointer(), MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase,           Map::kDescriptorsOffset,  Handle<Name>(),
+      Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapInstanceType() {
-  FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
-                        TypeCache::Get().kUint8, MachineType::Uint8()};
+  FieldAccess access = {kTaggedBase,          Map::kInstanceTypeOffset,
+                        Handle<Name>(),       TypeCache::Get().kUint8,
+                        MachineType::Uint8(), kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapPrototype() {
-  FieldAccess access = {kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
-                        Type::TaggedPointer(), MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase,           Map::kPrototypeOffset,    Handle<Name>(),
+      Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForStringLength() {
-  FieldAccess access = {kTaggedBase, String::kLengthOffset, Handle<Name>(),
+  FieldAccess access = {kTaggedBase,
+                        String::kLengthOffset,
+                        Handle<Name>(),
                         TypeCache::Get().kStringLengthType,
-                        MachineType::AnyTagged()};
+                        MachineType::AnyTagged(),
+                        kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
-  FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
-                        Handle<Name>(), Type::Receiver(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        JSGlobalObject::kGlobalProxyOffset,
+                        Handle<Name>(),
+                        Type::Receiver(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
-  FieldAccess access = {kTaggedBase, JSGlobalObject::kNativeContextOffset,
-                        Handle<Name>(), Type::Internal(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        JSGlobalObject::kNativeContextOffset,
+                        Handle<Name>(),
+                        Type::Internal(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForValue() {
-  FieldAccess access = {kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
-                        Type::Any(), MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase, JSValue::kValueOffset,    Handle<Name>(),
+      Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForArgumentsLength() {
-  FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
-                        Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase, JSArgumentsObject::kLengthOffset, Handle<Name>(),
+      Type::Any(), MachineType::AnyTagged(),         kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForArgumentsCallee() {
-  FieldAccess access = {kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
-                        Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        JSSloppyArgumentsObject::kCalleeOffset,
+                        Handle<Name>(),
+                        Type::Any(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -285,8 +380,12 @@
 // static
 FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
   int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
-  FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        offset,
+                        Handle<Name>(),
+                        Type::Any(),
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
@@ -296,8 +395,12 @@
   int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
   DCHECK_EQ(offset,
             Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
-  FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        offset,
+                        Handle<Name>(),
+                        Type::Any(),
+                        MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
@@ -310,16 +413,21 @@
 
 // static
 FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
-  FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
-                        type, MachineType::AnyTagged()};
+  FieldAccess access = {
+      kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
+      type,        MachineType::AnyTagged(),   kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
-  FieldAccess access = {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
-                        Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase,
+                        SharedFunctionInfo::kFeedbackVectorOffset,
+                        Handle<Name>(),
+                        Type::Any(),
+                        MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -327,7 +435,7 @@
 // static
 ElementAccess AccessBuilder::ForFixedArrayElement() {
   ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
-                          MachineType::AnyTagged()};
+                          MachineType::AnyTagged(), kFullWriteBarrier};
   return access;
 }
 
@@ -335,7 +443,8 @@
 // static
 ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
   ElementAccess access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
-                          TypeCache::Get().kFloat64, MachineType::Float64()};
+                          TypeCache::Get().kFloat64, MachineType::Float64(),
+                          kNoWriteBarrier};
   return access;
 }
 
@@ -348,56 +457,49 @@
   switch (type) {
     case kExternalInt8Array: {
       ElementAccess access = {taggedness, header_size, Type::Signed32(),
-                              MachineType::Int8()};
+                              MachineType::Int8(), kNoWriteBarrier};
       return access;
     }
     case kExternalUint8Array:
     case kExternalUint8ClampedArray: {
       ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
-                              MachineType::Uint8()};
+                              MachineType::Uint8(), kNoWriteBarrier};
       return access;
     }
     case kExternalInt16Array: {
       ElementAccess access = {taggedness, header_size, Type::Signed32(),
-                              MachineType::Int16()};
+                              MachineType::Int16(), kNoWriteBarrier};
       return access;
     }
     case kExternalUint16Array: {
       ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
-                              MachineType::Uint16()};
+                              MachineType::Uint16(), kNoWriteBarrier};
       return access;
     }
     case kExternalInt32Array: {
       ElementAccess access = {taggedness, header_size, Type::Signed32(),
-                              MachineType::Int32()};
+                              MachineType::Int32(), kNoWriteBarrier};
       return access;
     }
     case kExternalUint32Array: {
       ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
-                              MachineType::Uint32()};
+                              MachineType::Uint32(), kNoWriteBarrier};
       return access;
     }
     case kExternalFloat32Array: {
       ElementAccess access = {taggedness, header_size, Type::Number(),
-                              MachineType::Float32()};
+                              MachineType::Float32(), kNoWriteBarrier};
       return access;
     }
     case kExternalFloat64Array: {
       ElementAccess access = {taggedness, header_size, Type::Number(),
-                              MachineType::Float64()};
+                              MachineType::Float64(), kNoWriteBarrier};
       return access;
     }
   }
   UNREACHABLE();
-  ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None()};
-  return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForStatsCounter() {
-  FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
-                        TypeCache::Get().kInt32, MachineType::Int32()};
+  ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None(),
+                          kNoWriteBarrier};
   return access;
 }
 
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index 8375d37..b36277e 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -34,12 +34,24 @@
   // Provides access to JSObject inobject property fields.
   static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
 
+  // Provides access to JSFunction::prototype_or_initial_map() field.
+  static FieldAccess ForJSFunctionPrototypeOrInitialMap();
+
   // Provides access to JSFunction::context() field.
   static FieldAccess ForJSFunctionContext();
 
   // Provides access to JSFunction::shared() field.
   static FieldAccess ForJSFunctionSharedFunctionInfo();
 
+  // Provides access to JSFunction::literals() field.
+  static FieldAccess ForJSFunctionLiterals();
+
+  // Provides access to JSFunction::code() field.
+  static FieldAccess ForJSFunctionCodeEntry();
+
+  // Provides access to JSFunction::next_function_link() field.
+  static FieldAccess ForJSFunctionNextFunctionLink();
+
   // Provides access to JSArray::length() field.
   static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
 
@@ -130,12 +142,6 @@
   static ElementAccess ForTypedArrayElement(ExternalArrayType type,
                                             bool is_external);
 
-  // ===========================================================================
-  // Access to global per-isolate variables (based on external reference).
-
-  // Provides access to the backing store of a StatsCounter.
-  static FieldAccess ForStatsCounter();
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
 };
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 4a2a857..e38f629 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -192,12 +192,12 @@
   MapTransitionList transitions(maps.length());
   for (Handle<Map> map : maps) {
     if (Map::TryUpdate(map).ToHandle(&map)) {
-      Handle<Map> transition_target =
-          Map::FindTransitionedMap(map, &possible_transition_targets);
-      if (transition_target.is_null()) {
+      Map* transition_target =
+          map->FindElementsKindTransitionedMap(&possible_transition_targets);
+      if (transition_target == nullptr) {
         receiver_maps.Add(map);
       } else {
-        transitions.push_back(std::make_pair(map, transition_target));
+        transitions.push_back(std::make_pair(map, handle(transition_target)));
       }
     }
   }
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index a0b5022..2c9415e 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -149,7 +149,7 @@
 
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
   }
 
@@ -218,7 +218,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
                        Register value, Register scratch0, Register scratch1,
@@ -388,12 +389,25 @@
     DCHECK_EQ(LeaveCC, i.OutputSBit());           \
   } while (0)
 
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)                       \
+  do {                                                                \
+    __ asm_instr(i.OutputRegister(),                                  \
+                 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+    __ dmb(ISH);                                                      \
+  } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)                      \
+  do {                                                                \
+    __ dmb(ISH);                                                      \
+    __ asm_instr(i.InputRegister(2),                                  \
+                 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+    __ dmb(ISH);                                                      \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ LeaveFrame(StackFrame::MANUAL);
 }
 
-void CodeGenerator::AssembleSetupStackPointer() {}
-
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -445,7 +459,8 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   ArmOperandConverter i(this, instr);
 
   __ MaybeCheckConstPool();
@@ -488,6 +503,14 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!instr->InputAt(0)->IsImmediate());
+      __ Jump(i.InputRegister(0));
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
@@ -571,7 +594,9 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -856,7 +881,7 @@
       }
       break;
     case kArmVcmpF32:
-      if (instr->InputAt(1)->IsDoubleRegister()) {
+      if (instr->InputAt(1)->IsFPRegister()) {
         __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
                                  i.InputFloat32Register(1));
       } else {
@@ -907,7 +932,7 @@
       __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArmVcmpF64:
-      if (instr->InputAt(1)->IsDoubleRegister()) {
+      if (instr->InputAt(1)->IsFPRegister()) {
         __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
                                  i.InputFloat64Register(1));
       } else {
@@ -1146,8 +1171,48 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmFloat32Max: {
+      CpuFeatureScope scope(masm(), ARMv8);
+      // (b < a) ? a : b
+      SwVfpRegister a = i.InputFloat32Register(0);
+      SwVfpRegister b = i.InputFloat32Register(1);
+      SwVfpRegister result = i.OutputFloat32Register(0);
+      __ VFPCompareAndSetFlags(a, b);
+      __ vsel(gt, result, a, b);
+      break;
+    }
+    case kArmFloat32Min: {
+      CpuFeatureScope scope(masm(), ARMv8);
+      // (a < b) ? a : b
+      SwVfpRegister a = i.InputFloat32Register(0);
+      SwVfpRegister b = i.InputFloat32Register(1);
+      SwVfpRegister result = i.OutputFloat32Register(0);
+      __ VFPCompareAndSetFlags(b, a);
+      __ vsel(gt, result, a, b);
+      break;
+    }
+    case kArmFloat64Max: {
+      CpuFeatureScope scope(masm(), ARMv8);
+      // (b < a) ? a : b
+      DwVfpRegister a = i.InputFloat64Register(0);
+      DwVfpRegister b = i.InputFloat64Register(1);
+      DwVfpRegister result = i.OutputFloat64Register(0);
+      __ VFPCompareAndSetFlags(a, b);
+      __ vsel(gt, result, a, b);
+      break;
+    }
+    case kArmFloat64Min: {
+      CpuFeatureScope scope(masm(), ARMv8);
+      // (a < b) ? a : b
+      DwVfpRegister a = i.InputFloat64Register(0);
+      DwVfpRegister b = i.InputFloat64Register(1);
+      DwVfpRegister result = i.OutputFloat64Register(0);
+      __ VFPCompareAndSetFlags(b, a);
+      __ vsel(gt, result, a, b);
+      break;
+    }
     case kArmPush:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ vpush(i.InputDoubleRegister(0));
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
       } else {
@@ -1202,7 +1267,34 @@
     case kCheckedStoreWord64:
       UNREACHABLE();  // currently unsupported checked int64 load/store.
       break;
+
+    case kAtomicLoadInt8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
+      break;
+    case kAtomicLoadUint8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
+      break;
+    case kAtomicLoadInt16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
+      break;
+    case kAtomicLoadUint16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
+      break;
+    case kAtomicLoadWord32:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
+      break;
+
+    case kAtomicStoreWord8:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
+      break;
+    case kAtomicStoreWord16:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
+      break;
+    case kAtomicStoreWord32:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(str);
+      break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 
@@ -1263,20 +1355,47 @@
   }
 }
 
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   __ CheckConstPool(false, false);
+  return kSuccess;
 }
 
+void CodeGenerator::FinishFrame(Frame* frame) {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
-void CodeGenerator::AssemblePrologue() {
+  const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+  if (saves_fp != 0) {
+    frame->AlignSavedCalleeRegisterSlots();
+  }
+
+  if (saves_fp != 0) {
+    // Save callee-saved FP registers.
+    STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+    uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+    uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+    DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+    frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
+                                            (kDoubleSize / kPointerSize));
+  }
+  const RegList saves = FLAG_enable_embedded_constant_pool
+                            ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
+                            : descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    // Save callee-saved registers.
+    frame->AllocateSavedCalleeRegisterSlots(
+        base::bits::CountPopulation32(saves));
+  }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (frame_access_state()->has_frame()) {
     if (descriptor->IsCFunctionCall()) {
@@ -1295,7 +1414,8 @@
     }
   }
 
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots = frame()->GetSpillSlotCount();
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1306,15 +1426,12 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
   const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
-  if (saves_fp != 0) {
-    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
-  }
-  if (stack_shrink_slots > 0) {
-    __ sub(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+  if (shrink_slots > 0) {
+    __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
   }
 
   if (saves_fp != 0) {
@@ -1325,8 +1442,6 @@
     DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
     __ vstm(db_w, sp, DwVfpRegister::from_code(first),
             DwVfpRegister::from_code(last));
-    frame()->AllocateSavedCalleeRegisterSlots((last - first + 1) *
-                                              (kDoubleSize / kPointerSize));
   }
   const RegList saves = FLAG_enable_embedded_constant_pool
                             ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
@@ -1334,8 +1449,6 @@
   if (saves != 0) {
     // Save callee-saved registers.
     __ stm(db_w, sp, saves);
-    frame()->AllocateSavedCalleeRegisterSlots(
-        base::bits::CountPopulation32(saves));
   }
 }
 
@@ -1408,7 +1521,12 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          __ mov(dst, Operand(src.ToInt32()));
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+            __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+          } else {
+            __ mov(dst, Operand(src.ToInt32()));
+          }
           break;
         case Constant::kInt64:
           UNREACHABLE();
@@ -1443,7 +1561,7 @@
       }
       if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
     } else if (src.type() == Constant::kFloat32) {
-      if (destination->IsDoubleStackSlot()) {
+      if (destination->IsFPStackSlot()) {
         MemOperand dst = g.ToMemOperand(destination);
         __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
         __ str(ip, dst);
@@ -1453,27 +1571,27 @@
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
-      DwVfpRegister dst = destination->IsDoubleRegister()
+      DwVfpRegister dst = destination->IsFPRegister()
                               ? g.ToFloat64Register(destination)
                               : kScratchDoubleReg;
       __ vmov(dst, src.ToFloat64(), kScratchReg);
-      if (destination->IsDoubleStackSlot()) {
+      if (destination->IsFPStackSlot()) {
         __ vstr(dst, g.ToMemOperand(destination));
       }
     }
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     DwVfpRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       DwVfpRegister dst = g.ToDoubleRegister(destination);
       __ Move(dst, src);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       __ vstr(src, g.ToMemOperand(destination));
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       __ vldr(g.ToDoubleRegister(destination), src);
     } else {
       DwVfpRegister temp = kScratchDoubleReg;
@@ -1517,23 +1635,23 @@
     __ vldr(temp_1, dst);
     __ str(temp_0, dst);
     __ vstr(temp_1, src);
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     DwVfpRegister temp = kScratchDoubleReg;
     DwVfpRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       DwVfpRegister dst = g.ToDoubleRegister(destination);
       __ Move(temp, src);
       __ Move(src, dst);
       __ Move(dst, temp);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       MemOperand dst = g.ToMemOperand(destination);
       __ Move(temp, src);
       __ vldr(src, dst);
       __ vstr(temp, dst);
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPStackSlot());
     Register temp_0 = kScratchReg;
     DwVfpRegister temp_1 = kScratchDoubleReg;
     MemOperand src0 = g.ToMemOperand(source);
@@ -1559,11 +1677,6 @@
 }
 
 
-void CodeGenerator::AddNopForSmiCodeInlining() {
-  // On 32-bit ARM we do not insert nops for inlined Smi code.
-}
-
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 5e6f5c9..fc371e0 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -101,6 +101,10 @@
   V(ArmVstrF32)                    \
   V(ArmVldrF64)                    \
   V(ArmVstrF64)                    \
+  V(ArmFloat32Max)                 \
+  V(ArmFloat32Min)                 \
+  V(ArmFloat64Max)                 \
+  V(ArmFloat64Min)                 \
   V(ArmLdrb)                       \
   V(ArmLdrsb)                      \
   V(ArmStrb)                       \
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index 466765e..ec28b72 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -99,6 +99,10 @@
     case kArmVmovHighU32F64:
     case kArmVmovHighF64U32:
     case kArmVmovF64U32U32:
+    case kArmFloat64Max:
+    case kArmFloat64Min:
+    case kArmFloat32Max:
+    case kArmFloat32Min:
       return kNoOpcodeFlags;
 
     case kArmVldrF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 76d9e3c..b2b1a70 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -1142,15 +1142,12 @@
   VisitRR(this, kArmVcvtF32F64, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+  VisitRR(this, kArchTruncateDoubleToI, node);
+}
 
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      return VisitRR(this, kArchTruncateDoubleToI, node);
-    case TruncationMode::kRoundToZero:
-      return VisitRR(this, kArmVcvtS32F64, node);
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  VisitRR(this, kArmVcvtS32F64, node);
 }
 
 
@@ -1208,6 +1205,35 @@
   VisitRRR(this, kArmVaddF64, node);
 }
 
+namespace {
+void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) {
+  ArmOperandGenerator g(selector);
+  Float32BinopMatcher m(node);
+  if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) {
+    Float32BinopMatcher mright(m.right().node());
+    selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node),
+                   g.UseRegister(m.left().node()),
+                   g.UseRegister(mright.left().node()),
+                   g.UseRegister(mright.right().node()));
+    return;
+  }
+  VisitRRR(selector, kArmVsubF32, node);
+}
+
+void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) {
+  ArmOperandGenerator g(selector);
+  Float64BinopMatcher m(node);
+  if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) {
+    Float64BinopMatcher mright(m.right().node());
+    selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node),
+                   g.UseRegister(m.left().node()),
+                   g.UseRegister(mright.left().node()),
+                   g.UseRegister(mright.right().node()));
+    return;
+  }
+  VisitRRR(selector, kArmVsubF64, node);
+}
+}  // namespace
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
   ArmOperandGenerator g(this);
@@ -1217,16 +1243,12 @@
          g.UseRegister(m.right().node()));
     return;
   }
-  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    Float32BinopMatcher mright(m.right().node());
-    Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
-         g.UseRegister(mright.left().node()),
-         g.UseRegister(mright.right().node()));
-    return;
-  }
-  VisitRRR(this, kArmVsubF32, node);
+  VisitFloat32SubHelper(this, node);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  VisitFloat32SubHelper(this, node);
+}
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   ArmOperandGenerator g(this);
@@ -1248,16 +1270,12 @@
          g.UseRegister(m.right().node()));
     return;
   }
-  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    Float64BinopMatcher mright(m.right().node());
-    Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
-         g.UseRegister(mright.left().node()),
-         g.UseRegister(mright.right().node()));
-    return;
-  }
-  VisitRRR(this, kArmVsubF64, node);
+  VisitFloat64SubHelper(this, node);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  VisitFloat64SubHelper(this, node);
+}
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRRR(this, kArmVmulF32, node);
@@ -1285,18 +1303,25 @@
        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
 }
 
+void InstructionSelector::VisitFloat32Max(Node* node) {
+  DCHECK(IsSupported(ARMv8));
+  VisitRRR(this, kArmFloat32Max, node);
+}
 
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+  DCHECK(IsSupported(ARMv8));
+  VisitRRR(this, kArmFloat64Max, node);
+}
 
+void InstructionSelector::VisitFloat32Min(Node* node) {
+  DCHECK(IsSupported(ARMv8));
+  VisitRRR(this, kArmFloat32Min, node);
+}
 
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
-
+void InstructionSelector::VisitFloat64Min(Node* node) {
+  DCHECK(IsSupported(ARMv8));
+  VisitRRR(this, kArmFloat64Min, node);
+}
 
 void InstructionSelector::VisitFloat32Abs(Node* node) {
   VisitRR(this, kArmVabsF32, node);
@@ -1807,6 +1832,61 @@
        g.UseRegister(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicLoadWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kAtomicStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kAtomicStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicStoreWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  AddressingMode addressing_mode = kMode_Offset_RR;
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  inputs[input_count++] = g.UseUniqueRegister(base);
+  inputs[input_count++] = g.UseUniqueRegister(index);
+  inputs[input_count++] = g.UseUniqueRegister(value);
+  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+  Emit(code, 0, nullptr, input_count, inputs);
+}
 
 // static
 MachineOperatorBuilder::Flags
@@ -1826,7 +1906,11 @@
              MachineOperatorBuilder::kFloat64RoundTruncate |
              MachineOperatorBuilder::kFloat64RoundTiesAway |
              MachineOperatorBuilder::kFloat32RoundTiesEven |
-             MachineOperatorBuilder::kFloat64RoundTiesEven;
+             MachineOperatorBuilder::kFloat64RoundTiesEven |
+             MachineOperatorBuilder::kFloat32Min |
+             MachineOperatorBuilder::kFloat32Max |
+             MachineOperatorBuilder::kFloat64Min |
+             MachineOperatorBuilder::kFloat64Max;
   }
   return flags;
 }
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 456e7e7..0f9fb7c 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -33,6 +33,24 @@
     return InputDoubleRegister(index);
   }
 
+  CPURegister InputFloat32OrZeroRegister(size_t index) {
+    if (instr_->InputAt(index)->IsImmediate()) {
+      DCHECK(bit_cast<int32_t>(InputFloat32(index)) == 0);
+      return wzr;
+    }
+    DCHECK(instr_->InputAt(index)->IsFPRegister());
+    return InputDoubleRegister(index).S();
+  }
+
+  CPURegister InputFloat64OrZeroRegister(size_t index) {
+    if (instr_->InputAt(index)->IsImmediate()) {
+      DCHECK(bit_cast<int64_t>(InputDouble(index)) == 0);
+      return xzr;
+    }
+    DCHECK(instr_->InputAt(index)->IsDoubleRegister());
+    return InputDoubleRegister(index);
+  }
+
   size_t OutputCount() { return instr_->OutputCount(); }
 
   DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
@@ -141,7 +159,6 @@
     const size_t index = *first_index;
     switch (AddressingModeField::decode(instr_->opcode())) {
       case kMode_None:
-      case kMode_Operand2_R_LSL_I:
       case kMode_Operand2_R_LSR_I:
       case kMode_Operand2_R_ASR_I:
       case kMode_Operand2_R_ROR_I:
@@ -150,6 +167,10 @@
       case kMode_Operand2_R_SXTB:
       case kMode_Operand2_R_SXTH:
         break;
+      case kMode_Operand2_R_LSL_I:
+        *first_index += 3;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+                          LSL, InputInt32(index + 2));
       case kMode_MRI:
         *first_index += 2;
         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
@@ -183,9 +204,18 @@
     Constant constant = ToConstant(operand);
     switch (constant.type()) {
       case Constant::kInt32:
-        return Operand(constant.ToInt32());
+        if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          return Operand(constant.ToInt32(), constant.rmode());
+        } else {
+          return Operand(constant.ToInt32());
+        }
       case Constant::kInt64:
-        return Operand(constant.ToInt64());
+        if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+          return Operand(constant.ToInt64(), constant.rmode());
+        } else {
+          DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+          return Operand(constant.ToInt64());
+        }
       case Constant::kFloat32:
         return Operand(
             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
@@ -206,7 +236,7 @@
 
   MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
     DCHECK_NOT_NULL(op);
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
   }
 
@@ -412,27 +442,25 @@
     __ Bind(ool->exit());                                    \
   } while (0)
 
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width)          \
-  do {                                               \
-    auto buffer = i.InputRegister(0);                \
-    auto offset = i.InputRegister32(1);              \
-    auto length = i.InputOperand32(2);               \
-    auto value = i.InputFloat##width##Register(3);   \
-    __ Cmp(offset, length);                          \
-    Label done;                                      \
-    __ B(hs, &done);                                 \
-    __ Str(value, MemOperand(buffer, offset, UXTW)); \
-    __ Bind(&done);                                  \
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width)              \
+  do {                                                   \
+    auto buffer = i.InputRegister(0);                    \
+    auto offset = i.InputRegister32(1);                  \
+    auto length = i.InputOperand32(2);                   \
+    auto value = i.InputFloat##width##OrZeroRegister(3); \
+    __ Cmp(offset, length);                              \
+    Label done;                                          \
+    __ B(hs, &done);                                     \
+    __ Str(value, MemOperand(buffer, offset, UXTW));     \
+    __ Bind(&done);                                      \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)          \
   do {                                                     \
     auto buffer = i.InputRegister(0);                      \
     auto offset = i.InputRegister32(1);                    \
     auto length = i.InputOperand32(2);                     \
-    auto value = i.InputRegister32(3);                     \
+    auto value = i.InputOrZeroRegister32(3);               \
     __ Cmp(offset, length);                                \
     Label done;                                            \
     __ B(hs, &done);                                       \
@@ -440,13 +468,12 @@
     __ Bind(&done);                                        \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr)       \
   do {                                                     \
     auto buffer = i.InputRegister(0);                      \
     auto offset = i.InputRegister32(1);                    \
     auto length = i.InputOperand32(2);                     \
-    auto value = i.InputRegister(3);                       \
+    auto value = i.InputOrZeroRegister64(3);               \
     __ Cmp(offset, length);                                \
     Label done;                                            \
     __ B(hs, &done);                                       \
@@ -454,7 +481,6 @@
     __ Bind(&done);                                        \
   } while (0)
 
-
 #define ASSEMBLE_SHIFT(asm_instr, width)                                    \
   do {                                                                      \
     if (instr->InputAt(1)->IsRegister()) {                                  \
@@ -468,6 +494,21 @@
     }                                                                       \
   } while (0)
 
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)                       \
+  do {                                                                \
+    __ asm_instr(i.OutputRegister(),                                  \
+                 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+    __ Dmb(InnerShareable, BarrierAll);                               \
+  } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)                      \
+  do {                                                                \
+    __ Dmb(InnerShareable, BarrierAll);                               \
+    __ asm_instr(i.InputRegister(2),                                  \
+                 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+    __ Dmb(InnerShareable, BarrierAll);                               \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
@@ -526,7 +567,8 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   Arm64OperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -577,6 +619,14 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!instr->InputAt(0)->IsImmediate());
+      __ Jump(i.InputRegister(0));
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
@@ -670,7 +720,9 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -1038,7 +1090,7 @@
       Register prev = __ StackPointer();
       __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
       Operand operand(i.InputInt32(1) * kPointerSize);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Poke(i.InputFloat64Register(0), operand);
       } else {
         __ Poke(i.InputRegister(0), operand);
@@ -1048,7 +1100,7 @@
     }
     case kArm64PokePair: {
       int slot = i.InputInt32(2) - 1;
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
                     slot * kPointerSize);
       } else {
@@ -1088,7 +1140,7 @@
       __ Tst(i.InputRegister32(0), i.InputOperand32(1));
       break;
     case kArm64Float32Cmp:
-      if (instr->InputAt(1)->IsDoubleRegister()) {
+      if (instr->InputAt(1)->IsFPRegister()) {
         __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
       } else {
         DCHECK(instr->InputAt(1)->IsImmediate());
@@ -1132,7 +1184,7 @@
       __ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
     case kArm64Float64Cmp:
-      if (instr->InputAt(1)->IsDoubleRegister()) {
+      if (instr->InputAt(1)->IsFPRegister()) {
         __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
       } else {
         DCHECK(instr->InputAt(1)->IsImmediate());
@@ -1315,7 +1367,7 @@
       __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
       break;
     case kArm64Strb:
-      __ Strb(i.InputRegister(2), i.MemoryOperand());
+      __ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
       break;
     case kArm64Ldrh:
       __ Ldrh(i.OutputRegister(), i.MemoryOperand());
@@ -1324,31 +1376,31 @@
       __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
       break;
     case kArm64Strh:
-      __ Strh(i.InputRegister(2), i.MemoryOperand());
+      __ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
       break;
     case kArm64LdrW:
       __ Ldr(i.OutputRegister32(), i.MemoryOperand());
       break;
     case kArm64StrW:
-      __ Str(i.InputRegister32(2), i.MemoryOperand());
+      __ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
       break;
     case kArm64Ldr:
       __ Ldr(i.OutputRegister(), i.MemoryOperand());
       break;
     case kArm64Str:
-      __ Str(i.InputRegister(2), i.MemoryOperand());
+      __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
       break;
     case kArm64LdrS:
       __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
       break;
     case kArm64StrS:
-      __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
+      __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
       break;
     case kArm64LdrD:
       __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
       break;
     case kArm64StrD:
-      __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
+      __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
       break;
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
@@ -1392,7 +1444,37 @@
     case kCheckedStoreFloat64:
       ASSEMBLE_CHECKED_STORE_FLOAT(64);
       break;
+    case kAtomicLoadInt8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsb);
+      break;
+    case kAtomicLoadUint8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrb);
+      break;
+    case kAtomicLoadInt16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsh);
+      break;
+    case kAtomicLoadUint16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrh);
+      break;
+    case kAtomicLoadWord32:
+      __ Ldr(i.OutputRegister32(),
+             MemOperand(i.InputRegister(0), i.InputRegister(1)));
+      __ Dmb(InnerShareable, BarrierAll);
+      break;
+    case kAtomicStoreWord8:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(Strb);
+      break;
+    case kAtomicStoreWord16:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(Strh);
+      break;
+    case kAtomicStoreWord32:
+      __ Dmb(InnerShareable, BarrierAll);
+      __ Str(i.InputRegister32(2),
+             MemOperand(i.InputRegister(0), i.InputRegister(1)));
+      __ Dmb(InnerShareable, BarrierAll);
+      break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 
@@ -1495,30 +1577,49 @@
   __ EndBlockPools();
 }
 
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  return kSuccess;
 }
 
-void CodeGenerator::AssembleSetupStackPointer() {
-  const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+void CodeGenerator::FinishFrame(Frame* frame) {
+  frame->AlignFrame(16);
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
   if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
     __ SetStackPointer(csp);
   } else {
     __ SetStackPointer(jssp);
   }
+
+  // Save FP registers.
+  CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+                                   descriptor->CalleeSavedFPRegisters());
+  int saved_count = saves_fp.Count();
+  if (saved_count != 0) {
+    DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+    frame->AllocateSavedCalleeRegisterSlots(saved_count *
+                                            (kDoubleSize / kPointerSize));
+  }
+
+  CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+                                descriptor->CalleeSavedRegisters());
+  saved_count = saves.Count();
+  if (saved_count != 0) {
+    frame->AllocateSavedCalleeRegisterSlots(saved_count);
+  }
 }
 
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (descriptor->UseNativeStack()) {
     __ AssertCspAligned();
   }
 
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (frame_access_state()->has_frame()) {
     if (descriptor->IsJSFunctionCall()) {
       DCHECK(!descriptor->UseNativeStack());
@@ -1527,7 +1628,7 @@
       if (descriptor->IsCFunctionCall()) {
         __ Push(lr, fp);
         __ Mov(fp, masm_.StackPointer());
-        __ Claim(stack_shrink_slots);
+        __ Claim(frame()->GetSpillSlotCount());
       } else {
         __ StubPrologue(info()->GetOutputStackFrameType(),
                         frame()->GetTotalFrameSlotCount());
@@ -1535,6 +1636,8 @@
     }
   }
 
+  int shrink_slots = frame()->GetSpillSlotCount();
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1545,11 +1648,11 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
   if (descriptor->IsJSFunctionCall()) {
-    __ Claim(stack_shrink_slots);
+    __ Claim(shrink_slots);
   }
 
   // Save FP registers.
@@ -1559,8 +1662,6 @@
   if (saved_count != 0) {
     DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
     __ PushCPURegList(saves_fp);
-    frame()->AllocateSavedCalleeRegisterSlots(saved_count *
-                                              (kDoubleSize / kPointerSize));
   }
   // Save registers.
   // TODO(palfia): TF save list is not in sync with
@@ -1571,7 +1672,6 @@
   saved_count = saves.Count();
   if (saved_count != 0) {
     __ PushCPURegList(saves);
-    frame()->AllocateSavedCalleeRegisterSlots(saved_count);
   }
 }
 
@@ -1668,11 +1768,11 @@
         __ Str(dst, g.ToMemOperand(destination, masm()));
       }
     } else if (src.type() == Constant::kFloat32) {
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         FPRegister dst = g.ToDoubleRegister(destination).S();
         __ Fmov(dst, src.ToFloat32());
       } else {
-        DCHECK(destination->IsDoubleStackSlot());
+        DCHECK(destination->IsFPStackSlot());
         UseScratchRegisterScope scope(masm());
         FPRegister temp = scope.AcquireS();
         __ Fmov(temp, src.ToFloat32());
@@ -1680,30 +1780,30 @@
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         FPRegister dst = g.ToDoubleRegister(destination);
         __ Fmov(dst, src.ToFloat64());
       } else {
-        DCHECK(destination->IsDoubleStackSlot());
+        DCHECK(destination->IsFPStackSlot());
         UseScratchRegisterScope scope(masm());
         FPRegister temp = scope.AcquireD();
         __ Fmov(temp, src.ToFloat64());
         __ Str(temp, g.ToMemOperand(destination, masm()));
       }
     }
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     FPRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       FPRegister dst = g.ToDoubleRegister(destination);
       __ Fmov(dst, src);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       __ Str(src, g.ToMemOperand(destination, masm()));
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source, masm());
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       __ Ldr(g.ToDoubleRegister(destination), src);
     } else {
       UseScratchRegisterScope scope(masm());
@@ -1739,7 +1839,7 @@
       __ Ldr(src, dst);
       __ Str(temp, dst);
     }
-  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+  } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
     UseScratchRegisterScope scope(masm());
     DoubleRegister temp_0 = scope.AcquireD();
     DoubleRegister temp_1 = scope.AcquireD();
@@ -1749,17 +1849,17 @@
     __ Ldr(temp_1, dst);
     __ Str(temp_0, dst);
     __ Str(temp_1, src);
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     UseScratchRegisterScope scope(masm());
     FPRegister temp = scope.AcquireD();
     FPRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       FPRegister dst = g.ToDoubleRegister(destination);
       __ Fmov(temp, src);
       __ Fmov(src, dst);
       __ Fmov(dst, temp);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       MemOperand dst = g.ToMemOperand(destination, masm());
       __ Fmov(temp, src);
       __ Ldr(src, dst);
@@ -1778,9 +1878,6 @@
 }
 
 
-void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
-
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc
index ca37299..4320d56 100644
--- a/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -176,23 +176,46 @@
   // Basic latency modeling for arm64 instructions. They have been determined
   // in an empirical way.
   switch (instr->arch_opcode()) {
-    case kArm64Float32ToFloat64:
-    case kArm64Float64ToFloat32:
-    case kArm64Float64ToInt32:
-    case kArm64Float64ToUint32:
-    case kArm64Int32ToFloat64:
-    case kArm64Uint32ToFloat64:
-      return 3;
+    case kArm64Add:
+    case kArm64Add32:
+    case kArm64And:
+    case kArm64And32:
+    case kArm64Bic:
+    case kArm64Bic32:
+    case kArm64Cmn:
+    case kArm64Cmn32:
+    case kArm64Cmp:
+    case kArm64Cmp32:
+    case kArm64Eon:
+    case kArm64Eon32:
+    case kArm64Eor:
+    case kArm64Eor32:
+    case kArm64Not:
+    case kArm64Not32:
+    case kArm64Or:
+    case kArm64Or32:
+    case kArm64Orn:
+    case kArm64Orn32:
+    case kArm64Sub:
+    case kArm64Sub32:
+    case kArm64Tst:
+    case kArm64Tst32:
+      if (instr->addressing_mode() != kMode_None) {
+        return 3;
+      } else {
+        return 1;
+      }
 
-    case kArm64Float64Add:
-    case kArm64Float64Sub:
-      return 2;
-
-    case kArm64Float64Mul:
-      return 3;
-
-    case kArm64Float64Div:
-      return 6;
+    case kArm64Clz:
+    case kArm64Clz32:
+    case kArm64Sbfx32:
+    case kArm64Sxtb32:
+    case kArm64Sxth32:
+    case kArm64Sxtw:
+    case kArm64Ubfiz32:
+    case kArm64Ubfx:
+    case kArm64Ubfx32:
+      return 1;
 
     case kArm64Lsl:
     case kArm64Lsl32:
@@ -202,7 +225,17 @@
     case kArm64Asr32:
     case kArm64Ror:
     case kArm64Ror32:
-      return 3;
+      return 1;
+
+    case kArm64Ldr:
+    case kArm64LdrD:
+    case kArm64LdrS:
+    case kArm64LdrW:
+    case kArm64Ldrb:
+    case kArm64Ldrh:
+    case kArm64Ldrsb:
+    case kArm64Ldrsh:
+      return 11;
 
     case kCheckedLoadInt8:
     case kCheckedLoadUint8:
@@ -212,18 +245,94 @@
     case kCheckedLoadWord64:
     case kCheckedLoadFloat32:
     case kCheckedLoadFloat64:
-    case kArm64LdrS:
-    case kArm64LdrD:
-    case kArm64Ldrb:
-    case kArm64Ldrsb:
-    case kArm64Ldrh:
-    case kArm64Ldrsh:
-    case kArm64LdrW:
-    case kArm64Ldr:
+      return 5;
+
+    case kArm64Str:
+    case kArm64StrD:
+    case kArm64StrS:
+    case kArm64StrW:
+    case kArm64Strb:
+    case kArm64Strh:
+      return 1;
+
+    case kCheckedStoreWord8:
+    case kCheckedStoreWord16:
+    case kCheckedStoreWord32:
+    case kCheckedStoreWord64:
+    case kCheckedStoreFloat32:
+    case kCheckedStoreFloat64:
+      return 1;
+
+    case kArm64Madd32:
+    case kArm64Mneg32:
+    case kArm64Msub32:
+    case kArm64Mul32:
+      return 3;
+
+    case kArm64Madd:
+    case kArm64Mneg:
+    case kArm64Msub:
+    case kArm64Mul:
+      return 5;
+
+    case kArm64Idiv32:
+    case kArm64Udiv32:
+      return 12;
+
+    case kArm64Idiv:
+    case kArm64Udiv:
+      return 20;
+
+    case kArm64Float32Add:
+    case kArm64Float32Sub:
+    case kArm64Float64Add:
+    case kArm64Float64Sub:
+      return 5;
+
+    case kArm64Float32Abs:
+    case kArm64Float32Cmp:
+    case kArm64Float64Abs:
+    case kArm64Float64Cmp:
+    case kArm64Float64Neg:
+      return 3;
+
+    case kArm64Float32Div:
+    case kArm64Float32Sqrt:
+      return 12;
+
+    case kArm64Float64Div:
+    case kArm64Float64Sqrt:
+      return 19;
+
+    case kArm64Float32RoundDown:
+    case kArm64Float32RoundTiesEven:
+    case kArm64Float32RoundTruncate:
+    case kArm64Float32RoundUp:
+    case kArm64Float64RoundDown:
+    case kArm64Float64RoundTiesAway:
+    case kArm64Float64RoundTiesEven:
+    case kArm64Float64RoundTruncate:
+    case kArm64Float64RoundUp:
+      return 5;
+
+    case kArm64Float32ToFloat64:
+    case kArm64Float64ToFloat32:
+    case kArm64Float64ToInt32:
+    case kArm64Float64ToUint32:
+    case kArm64Float32ToInt64:
+    case kArm64Float64ToInt64:
+    case kArm64Float32ToUint64:
+    case kArm64Float64ToUint64:
+    case kArm64Int32ToFloat64:
+    case kArm64Int64ToFloat32:
+    case kArm64Int64ToFloat64:
+    case kArm64Uint32ToFloat64:
+    case kArm64Uint64ToFloat32:
+    case kArm64Uint64ToFloat64:
       return 5;
 
     default:
-      return 1;
+      return 2;
   }
 }
 
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index d90deae..240a4f2 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -40,7 +40,9 @@
   // Use the zero register if the node has the immediate value zero, otherwise
   // assign a register.
   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
-    if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
+    if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+        (IsFloatConstant(node) &&
+         (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
       return UseImmediate(node);
     }
     return UseRegister(node);
@@ -68,6 +70,19 @@
     return OpParameter<int64_t>(node);
   }
 
+  bool IsFloatConstant(Node* node) {
+    return (node->opcode() == IrOpcode::kFloat32Constant) ||
+           (node->opcode() == IrOpcode::kFloat64Constant);
+  }
+
+  double GetFloatConstantValue(Node* node) {
+    if (node->opcode() == IrOpcode::kFloat32Constant) {
+      return OpParameter<float>(node);
+    }
+    DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+    return OpParameter<double>(node);
+  }
+
   bool CanBeImmediate(Node* node, ImmediateMode mode) {
     return IsIntegerConstant(node) &&
            CanBeImmediate(GetIntegerConstantValue(node), mode);
@@ -106,6 +121,13 @@
     return false;
   }
 
+  bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
+    // TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
+    DCHECK_NE(MachineRepresentation::kSimd128, rep);
+    return IsIntegerConstant(node) &&
+           (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
+  }
+
  private:
   bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
     return Assembler::IsImmLSScaled(value, size) ||
@@ -211,6 +233,28 @@
   return false;
 }
 
+bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
+                            InstructionSelector* selector,
+                            MachineRepresentation rep, Node* node, Node* index,
+                            InstructionOperand* index_op,
+                            InstructionOperand* shift_immediate_op) {
+  if (!selector->CanCover(node, index)) return false;
+  if (index->InputCount() != 2) return false;
+  Node* left = index->InputAt(0);
+  Node* right = index->InputAt(1);
+  switch (index->opcode()) {
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord64Shl:
+      if (!g->CanBeLoadStoreShiftImmediate(right, rep)) {
+        return false;
+      }
+      *index_op = g->UseRegister(left);
+      *shift_immediate_op = g->UseImmediate(right);
+      return true;
+    default:
+      return false;
+  }
+}
 
 // Shared routine for multiple binary operations.
 template <typename Matcher>
@@ -344,12 +388,16 @@
 
 void InstructionSelector::VisitLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  MachineRepresentation rep = load_rep.representation();
   Arm64OperandGenerator g(this);
   Node* base = node->InputAt(0);
   Node* index = node->InputAt(1);
-  ArchOpcode opcode = kArchNop;
+  InstructionCode opcode = kArchNop;
   ImmediateMode immediate_mode = kNoImmediate;
-  switch (load_rep.representation()) {
+  InstructionOperand inputs[3];
+  size_t input_count = 0;
+  InstructionOperand outputs[1];
+  switch (rep) {
     case MachineRepresentation::kFloat32:
       opcode = kArm64LdrS;
       immediate_mode = kLoadStoreImm32;
@@ -381,13 +429,25 @@
       UNREACHABLE();
       return;
   }
+
+  outputs[0] = g.DefineAsRegister(node);
+  inputs[0] = g.UseRegister(base);
+
   if (g.CanBeImmediate(index, immediate_mode)) {
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+    input_count = 2;
+    inputs[1] = g.UseImmediate(index);
+    opcode |= AddressingModeField::encode(kMode_MRI);
+  } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[1],
+                                    &inputs[2])) {
+    input_count = 3;
+    opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
   } else {
-    Emit(opcode | AddressingModeField::encode(kMode_MRR),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+    input_count = 2;
+    inputs[1] = g.UseRegister(index);
+    opcode |= AddressingModeField::encode(kMode_MRR);
   }
+
+  Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
 }
 
 
@@ -441,7 +501,9 @@
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
-    ArchOpcode opcode = kArchNop;
+    InstructionOperand inputs[4];
+    size_t input_count = 0;
+    InstructionCode opcode = kArchNop;
     ImmediateMode immediate_mode = kNoImmediate;
     switch (rep) {
       case MachineRepresentation::kFloat32:
@@ -475,13 +537,25 @@
         UNREACHABLE();
         return;
     }
+
+    inputs[0] = g.UseRegisterOrImmediateZero(value);
+    inputs[1] = g.UseRegister(base);
+
     if (g.CanBeImmediate(index, immediate_mode)) {
-      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
-           g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+      input_count = 3;
+      inputs[2] = g.UseImmediate(index);
+      opcode |= AddressingModeField::encode(kMode_MRI);
+    } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[2],
+                                      &inputs[3])) {
+      input_count = 4;
+      opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
     } else {
-      Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
-           g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+      input_count = 3;
+      inputs[2] = g.UseRegister(index);
+      opcode |= AddressingModeField::encode(kMode_MRR);
     }
+
+    Emit(opcode, 0, nullptr, input_count, inputs);
   }
 }
 
@@ -559,7 +633,8 @@
       return;
   }
   Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
-       g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
+       g.UseOperand(length, kArithmeticImm),
+       g.UseRegisterOrImmediateZero(value));
 }
 
 
@@ -1396,6 +1471,20 @@
       Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
       return;
     }
+    case IrOpcode::kLoad: {
+      // As for the operations above, a 32-bit load will implicitly clear the
+      // top 32 bits of the destination register.
+      LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+      switch (load_rep.representation()) {
+        case MachineRepresentation::kWord8:
+        case MachineRepresentation::kWord16:
+        case MachineRepresentation::kWord32:
+          Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+          return;
+        default:
+          break;
+      }
+    }
     default:
       break;
   }
@@ -1407,15 +1496,12 @@
   VisitRR(this, kArm64Float64ToFloat32, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+  VisitRR(this, kArchTruncateDoubleToI, node);
+}
 
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      return VisitRR(this, kArchTruncateDoubleToI, node);
-    case TruncationMode::kRoundToZero:
-      return VisitRR(this, kArm64Float64ToInt32, node);
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  VisitRR(this, kArm64Float64ToInt32, node);
 }
 
 
@@ -1491,6 +1577,9 @@
   VisitRRR(this, kArm64Float32Sub, node);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  VisitRRR(this, kArm64Float32Sub, node);
+}
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   Arm64OperandGenerator g(this);
@@ -1515,6 +1604,9 @@
   VisitRRR(this, kArm64Float64Sub, node);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  VisitRRR(this, kArm64Float64Sub, node);
+}
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRRR(this, kArm64Float32Mul, node);
@@ -2246,6 +2338,61 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  Arm64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicLoadWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Emit(opcode | AddressingModeField::encode(kMode_MRR),
+       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  Arm64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kAtomicStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kAtomicStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicStoreWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  AddressingMode addressing_mode = kMode_MRR;
+  InstructionOperand inputs[3];
+  size_t input_count = 0;
+  inputs[input_count++] = g.UseUniqueRegister(base);
+  inputs[input_count++] = g.UseUniqueRegister(index);
+  inputs[input_count++] = g.UseUniqueRegister(value);
+  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+  Emit(code, 0, nullptr, input_count, inputs);
+}
 
 // static
 MachineOperatorBuilder::Flags
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index 89bb619..da8b626 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -492,6 +492,12 @@
     // Contexts nested in the native context have a canonical empty function as
     // their closure, not the anonymous closure containing the global code.
     return BuildLoadNativeContextField(Context::CLOSURE_INDEX);
+  } else if (closure_scope->is_eval_scope()) {
+    // Contexts nested inside eval code have the same closure as the context
+    // calling eval, not the anonymous closure containing the eval code.
+    const Operator* op =
+        javascript()->LoadContext(0, Context::CLOSURE_INDEX, false);
+    return NewNode(op, current_context());
   } else {
     DCHECK(closure_scope->is_function_scope());
     return GetFunctionClosure();
@@ -568,7 +574,7 @@
   }
 
   // Build local context only if there are context allocated variables.
-  if (info()->num_heap_slots() > 0) {
+  if (scope->num_heap_slots() > 0) {
     // Push a new inner context scope for the current activation.
     Node* inner_context = BuildLocalActivationContext(GetFunctionContext());
     ContextScope top_context(this, scope, inner_context);
@@ -1083,17 +1089,14 @@
 void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
   VariableMode mode = decl->mode();
-  bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+  bool hole_init = mode == CONST || mode == LET;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
-    case VariableLocation::UNALLOCATED: {
-      Handle<Oddball> value = variable->binding_needs_init()
-                                  ? isolate()->factory()->the_hole_value()
-                                  : isolate()->factory()->undefined_value();
+    case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals()->push_back(variable->name());
-      globals()->push_back(value);
+      globals()->push_back(isolate()->factory()->undefined_value());
       break;
-    }
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL:
       if (hole_init) {
@@ -1108,8 +1111,22 @@
         NewNode(op, current_context(), value);
       }
       break;
-    case VariableLocation::LOOKUP:
-      UNIMPLEMENTED();
+    case VariableLocation::LOOKUP: {
+      Node* name = jsgraph()->Constant(variable->name());
+      // For variables we must not push an initial value (such as 'undefined')
+      // because we may have a (legal) redeclaration and we must not destroy
+      // the current value.
+      Node* value =
+          hole_init ? jsgraph()->TheHoleConstant()
+                    : jsgraph()->ZeroConstant();  // Indicates no initial value.
+      Node* attr =
+          jsgraph()->Constant(variable->DeclarationPropertyAttributes());
+      const Operator* op =
+          javascript()->CallRuntime(Runtime::kDeclareLookupSlot);
+      Node* store = NewNode(op, name, value, attr);
+      PrepareFrameState(store, decl->proxy()->id());
+      break;
+    }
   }
 }
 
@@ -1141,8 +1158,18 @@
       NewNode(op, current_context(), value);
       break;
     }
-    case VariableLocation::LOOKUP:
-      UNIMPLEMENTED();
+    case VariableLocation::LOOKUP: {
+      VisitForValue(decl->fun());
+      Node* value = environment()->Pop();
+      Node* name = jsgraph()->Constant(variable->name());
+      Node* attr =
+          jsgraph()->Constant(variable->DeclarationPropertyAttributes());
+      const Operator* op =
+          javascript()->CallRuntime(Runtime::kDeclareLookupSlot);
+      Node* store = NewNode(op, name, value, attr);
+      PrepareFrameState(store, decl->proxy()->id());
+      break;
+    }
   }
 }
 
@@ -1398,10 +1425,10 @@
         VisitIterationBody(stmt, &for_loop);
       }
       test_value.End();
-      index = environment()->Peek(0);
       for_loop.EndBody();
 
       // Increment counter and continue.
+      index = environment()->Peek(0);
       index = NewNode(javascript()->ForInStep(), index);
       environment()->Poke(0, index);
     }
@@ -1640,12 +1667,11 @@
     }
   }
 
-  // Set both the prototype and constructor to have fast properties.
+  // Set the constructor to have fast properties.
   prototype = environment()->Pop();
   literal = environment()->Pop();
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kFinalizeClassDefinition);
-  literal = NewNode(op, literal, prototype);
+  const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
+  literal = NewNode(op, literal);
 
   // Assign to class variable.
   if (expr->class_variable_proxy() != nullptr) {
@@ -2225,7 +2251,7 @@
 
 
 void AstGraphBuilder::VisitYield(Yield* expr) {
-  // TODO(turbofan): Implement yield here.
+  // Generator functions are supported only by going through Ignition first.
   SetStackOverflow();
   ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
 }
@@ -2456,11 +2482,13 @@
     // provide a fully resolved callee to patch into the environment.
     Node* function = GetFunctionClosure();
     Node* language = jsgraph()->Constant(language_mode());
-    Node* position = jsgraph()->Constant(current_scope()->start_position());
+    Node* eval_scope_position =
+        jsgraph()->Constant(current_scope()->start_position());
+    Node* eval_position = jsgraph()->Constant(expr->position());
     const Operator* op =
         javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
-    Node* new_callee =
-        NewNode(op, callee, source, function, language, position);
+    Node* new_callee = NewNode(op, callee, source, function, language,
+                               eval_scope_position, eval_position);
     PrepareFrameState(new_callee, expr->EvalId(),
                       OutputFrameStateCombine::PokeAt(arg_count + 1));
 
@@ -2873,7 +2901,6 @@
       op = javascript()->GreaterThanOrEqual();
       break;
     case Token::INSTANCEOF:
-      DCHECK(!FLAG_harmony_instanceof);
       op = javascript()->InstanceOf();
       break;
     case Token::IN:
@@ -2939,9 +2966,7 @@
   Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
       static_cast<int>(globals()->size()), TENURED);
   for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
-  int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
-                      DeclareGlobalsNativeFlag::encode(info()->is_native()) |
-                      DeclareGlobalsLanguageMode::encode(language_mode());
+  int encoded_flags = info()->GetDeclareGlobalsFlags();
   Node* flags = jsgraph()->Constant(encoded_flags);
   Node* pairs = jsgraph()->Constant(data);
   const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
@@ -3183,7 +3208,7 @@
 
 
 Node* AstGraphBuilder::BuildLocalFunctionContext(Scope* scope) {
-  DCHECK(scope->is_function_scope());
+  DCHECK(scope->is_function_scope() || scope->is_eval_scope());
 
   // Allocate a new local context.
   int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
@@ -3291,16 +3316,6 @@
 }
 
 
-Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
-                                            Node* not_hole) {
-  Node* the_hole = jsgraph()->TheHoleConstant();
-  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
-  return NewNode(
-      common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
-      check, for_hole, not_hole);
-}
-
-
 Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
                                                Node* not_hole,
                                                BailoutId bailout_id) {
@@ -3374,15 +3389,7 @@
     case VariableLocation::LOCAL: {
       // Local var, const, or let variable.
       Node* value = environment()->Lookup(variable);
-      if (mode == CONST_LEGACY) {
-        // Perform check for uninitialized legacy const variables.
-        if (value->op() == the_hole->op()) {
-          value = jsgraph()->UndefinedConstant();
-        } else if (value->opcode() == IrOpcode::kPhi) {
-          Node* undefined = jsgraph()->UndefinedConstant();
-          value = BuildHoleCheckSilent(value, undefined, value);
-        }
-      } else if (mode == LET || mode == CONST) {
+      if (mode == LET || mode == CONST) {
         // Perform check for uninitialized let/const variables.
         if (value->op() == the_hole->op()) {
           value = BuildThrowReferenceError(variable, bailout_id);
@@ -3402,11 +3409,7 @@
       // TODO(titzer): initialization checks are redundant for already
       // initialized immutable context loads, but only specialization knows.
       // Maybe specializer should be a parameter to the graph builder?
-      if (mode == CONST_LEGACY) {
-        // Perform check for uninitialized legacy const variables.
-        Node* undefined = jsgraph()->UndefinedConstant();
-        value = BuildHoleCheckSilent(value, undefined, value);
-      } else if (mode == LET || mode == CONST) {
+      if (mode == LET || mode == CONST) {
         // Perform check for uninitialized let/const variables.
         value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
       }
@@ -3483,13 +3486,7 @@
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL:
       // Local var, const, or let variable.
-      if (mode == CONST_LEGACY && op == Token::INIT) {
-        // Perform an initialization check for legacy const variables.
-        Node* current = environment()->Lookup(variable);
-        if (current->op() != the_hole->op()) {
-          value = BuildHoleCheckSilent(current, value, current);
-        }
-      } else if (mode == CONST_LEGACY && op != Token::INIT) {
+      if (mode == CONST_LEGACY && op != Token::INIT) {
         // Non-initializing assignment to legacy const is
         // - exception in strict mode.
         // - ignored in sloppy mode.
@@ -3534,13 +3531,7 @@
     case VariableLocation::CONTEXT: {
       // Context variable (potentially up the context chain).
       int depth = current_scope()->ContextChainLength(variable->scope());
-      if (mode == CONST_LEGACY && op == Token::INIT) {
-        // Perform an initialization check for legacy const variables.
-        const Operator* op =
-            javascript()->LoadContext(depth, variable->index(), false);
-        Node* current = NewNode(op, current_context());
-        value = BuildHoleCheckSilent(current, value, current);
-      } else if (mode == CONST_LEGACY && op != Token::INIT) {
+      if (mode == CONST_LEGACY && op != Token::INIT) {
         // Non-initializing assignment to legacy const is
         // - exception in strict mode.
         // - ignored in sloppy mode.
@@ -3578,8 +3569,6 @@
     case VariableLocation::LOOKUP: {
       // Dynamic lookup of context variable (anywhere in the chain).
       Handle<Name> name = variable->name();
-      // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
-      // initializations of const declarations.
       Node* store = BuildDynamicStore(name, value);
       PrepareFrameState(store, bailout_id, combine);
       return store;
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index e206db0..1d0fc90 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -15,7 +15,7 @@
 
 // Forward declarations.
 class BitVector;
-
+class CompilationInfo;
 
 namespace compiler {
 
@@ -341,7 +341,6 @@
   Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
 
   // Builders for dynamic hole-checks at runtime.
-  Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
   Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
                                 BailoutId bailout_id);
   Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index ac96399..334c597 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -265,8 +265,9 @@
 void ALAA::VisitForOfStatement(ForOfStatement* loop) {
   Visit(loop->assign_iterator());
   Enter(loop);
+  Visit(loop->next_result());
+  Visit(loop->result_done());
   Visit(loop->assign_each());
-  Visit(loop->subject());
   Visit(loop->body());
   Exit(loop);
 }
diff --git a/src/compiler/ast-loop-assignment-analyzer.h b/src/compiler/ast-loop-assignment-analyzer.h
index 1696911..a4a4609 100644
--- a/src/compiler/ast-loop-assignment-analyzer.h
+++ b/src/compiler/ast-loop-assignment-analyzer.h
@@ -12,8 +12,9 @@
 namespace v8 {
 namespace internal {
 
-class Variable;
+class CompilationInfo;
 class Scope;
+class Variable;
 
 namespace compiler {
 
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index 2249cbc..22299de 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -882,7 +882,9 @@
   Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetFlagOperand(2);
+  int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
+  int literal_flags =
+      interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
   // TODO(mstarzinger): Thread through number of properties.
   int number_of_properties = constant_properties->length() / 2;
   const Operator* op = javascript()->CreateLiteralObject(
@@ -1121,9 +1123,11 @@
 
 void BytecodeGraphBuilder::VisitInc() {
   FrameStateBeforeAndAfter states(this);
-  const Operator* js_op = javascript()->Add(BinaryOperationHints::Any());
+  // Note: Use subtract -1 here instead of add 1 to ensure we always convert to
+  // a number, not a string.
+  const Operator* js_op = javascript()->Subtract(BinaryOperationHints::Any());
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
-                       jsgraph()->OneConstant());
+                       jsgraph()->Constant(-1.0));
   environment()->BindAccumulator(node, &states);
 }
 
@@ -1136,6 +1140,13 @@
 }
 
 void BytecodeGraphBuilder::VisitLogicalNot() {
+  Node* value = environment()->LookupAccumulator();
+  Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
+                       jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+  environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitToBooleanLogicalNot() {
   Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
                         environment()->LookupAccumulator());
   Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
@@ -1209,7 +1220,6 @@
 }
 
 void BytecodeGraphBuilder::VisitTestInstanceOf() {
-  DCHECK(!FLAG_harmony_instanceof);
   BuildCompareOp(javascript()->InstanceOf());
 }
 
@@ -1362,6 +1372,48 @@
   environment()->BindAccumulator(index, &states);
 }
 
+void BytecodeGraphBuilder::VisitSuspendGenerator() {
+  Node* state = environment()->LookupAccumulator();
+  Node* generator = environment()->LookupRegister(
+      bytecode_iterator().GetRegisterOperand(0));
+
+  for (int i = 0; i < environment()->register_count(); ++i) {
+    Node* value = environment()->LookupRegister(interpreter::Register(i));
+    NewNode(javascript()->CallRuntime(Runtime::kGeneratorStoreRegister),
+        generator, jsgraph()->Constant(i), value);
+  }
+
+  NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContext), generator);
+  NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContinuation),
+      generator, state);
+}
+
+void BytecodeGraphBuilder::VisitResumeGenerator() {
+  FrameStateBeforeAndAfter states(this);
+
+  Node* generator = environment()->LookupRegister(
+      bytecode_iterator().GetRegisterOperand(0));
+  Node* state = NewNode(javascript()->CallRuntime(
+      Runtime::kGeneratorGetContinuation), generator);
+
+  // Bijection between registers and array indices must match that used in
+  // InterpreterAssembler::ExportRegisterFile.
+  for (int i = 0; i < environment()->register_count(); ++i) {
+    Node* value = NewNode(
+        javascript()->CallRuntime(Runtime::kGeneratorLoadRegister),
+        generator, jsgraph()->Constant(i));
+    environment()->BindRegister(interpreter::Register(i), value);
+
+    NewNode(javascript()->CallRuntime(Runtime::kGeneratorStoreRegister),
+        generator, jsgraph()->Constant(i), jsgraph()->StaleRegisterConstant());
+  }
+
+  NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContinuation),
+      generator, jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
+
+  environment()->BindAccumulator(state, &states);
+}
+
 void BytecodeGraphBuilder::VisitWide() {
   // Consumed by the BytecodeArrayIterator.
   UNREACHABLE();
@@ -1373,10 +1425,12 @@
 }
 
 void BytecodeGraphBuilder::VisitIllegal() {
-  // Never present in valid bytecode.
+  // Not emitted in valid bytecode.
   UNREACHABLE();
 }
 
+void BytecodeGraphBuilder::VisitNop() {}
+
 void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
   if (merge_environments_[current_offset] != nullptr) {
     if (environment() != nullptr) {
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
deleted file mode 100644
index 907b36a..0000000
--- a/src/compiler/change-lowering.cc
+++ /dev/null
@@ -1,713 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/change-lowering.h"
-
-#include "src/address-map.h"
-#include "src/code-factory.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/operator-properties.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-ChangeLowering::~ChangeLowering() {}
-
-
-Reduction ChangeLowering::Reduce(Node* node) {
-  Node* control = graph()->start();
-  switch (node->opcode()) {
-    case IrOpcode::kChangeBitToBool:
-      return ChangeBitToBool(node->InputAt(0), control);
-    case IrOpcode::kChangeBoolToBit:
-      return ChangeBoolToBit(node->InputAt(0));
-    case IrOpcode::kChangeFloat64ToTagged:
-      return ChangeFloat64ToTagged(node->InputAt(0), control);
-    case IrOpcode::kChangeInt32ToTagged:
-      return ChangeInt32ToTagged(node->InputAt(0), control);
-    case IrOpcode::kChangeTaggedToFloat64:
-      return ChangeTaggedToFloat64(node->InputAt(0), control);
-    case IrOpcode::kChangeTaggedToInt32:
-      return ChangeTaggedToUI32(node->InputAt(0), control, kSigned);
-    case IrOpcode::kChangeTaggedToUint32:
-      return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
-    case IrOpcode::kChangeUint32ToTagged:
-      return ChangeUint32ToTagged(node->InputAt(0), control);
-    case IrOpcode::kLoadField:
-      return LoadField(node);
-    case IrOpcode::kStoreField:
-      return StoreField(node);
-    case IrOpcode::kLoadElement:
-      return LoadElement(node);
-    case IrOpcode::kStoreElement:
-      return StoreElement(node);
-    case IrOpcode::kAllocate:
-      return Allocate(node);
-    case IrOpcode::kObjectIsReceiver:
-      return ObjectIsReceiver(node);
-    case IrOpcode::kObjectIsSmi:
-      return ObjectIsSmi(node);
-    case IrOpcode::kObjectIsNumber:
-      return ObjectIsNumber(node);
-    case IrOpcode::kObjectIsUndetectable:
-      return ObjectIsUndetectable(node);
-    default:
-      return NoChange();
-  }
-  UNREACHABLE();
-  return NoChange();
-}
-
-
-Node* ChangeLowering::HeapNumberValueIndexConstant() {
-  return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
-}
-
-
-Node* ChangeLowering::SmiMaxValueConstant() {
-  return jsgraph()->Int32Constant(Smi::kMaxValue);
-}
-
-
-Node* ChangeLowering::SmiShiftBitsConstant() {
-  return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
-}
-
-
-Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
-  // The AllocateHeapNumberStub does not use the context, so we can safely pass
-  // in Smi zero here.
-  Callable callable = CodeFactory::AllocateHeapNumber(isolate());
-  Node* target = jsgraph()->HeapConstant(callable.code());
-  Node* context = jsgraph()->NoContextConstant();
-  Node* effect = graph()->NewNode(common()->BeginRegion(), graph()->start());
-  if (!allocate_heap_number_operator_.is_set()) {
-    CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-        isolate(), jsgraph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNoFlags, Operator::kNoThrow);
-    allocate_heap_number_operator_.set(common()->Call(descriptor));
-  }
-  Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
-                                       target, context, effect, control);
-  Node* store = graph()->NewNode(
-      machine()->Store(StoreRepresentation(MachineRepresentation::kFloat64,
-                                           kNoWriteBarrier)),
-      heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
-  return graph()->NewNode(common()->FinishRegion(), heap_number, store);
-}
-
-
-Node* ChangeLowering::ChangeInt32ToFloat64(Node* value) {
-  return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
-}
-
-
-Node* ChangeLowering::ChangeInt32ToSmi(Node* value) {
-  if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
-  }
-  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-
-Node* ChangeLowering::ChangeSmiToFloat64(Node* value) {
-  return ChangeInt32ToFloat64(ChangeSmiToInt32(value));
-}
-
-
-Node* ChangeLowering::ChangeSmiToInt32(Node* value) {
-  value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
-  if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
-  }
-  return value;
-}
-
-
-Node* ChangeLowering::ChangeUint32ToFloat64(Node* value) {
-  return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
-}
-
-
-Node* ChangeLowering::ChangeUint32ToSmi(Node* value) {
-  if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
-  }
-  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-
-Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
-  return graph()->NewNode(machine()->Load(MachineType::Float64()), value,
-                          HeapNumberValueIndexConstant(), graph()->start(),
-                          control);
-}
-
-
-Node* ChangeLowering::TestNotSmi(Node* value) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagMask == 1);
-  return graph()->NewNode(machine()->WordAnd(), value,
-                          jsgraph()->IntPtrConstant(kSmiTagMask));
-}
-
-
-Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
-  return Replace(
-      graph()->NewNode(common()->Select(MachineRepresentation::kTagged), value,
-                       jsgraph()->TrueConstant(), jsgraph()->FalseConstant()));
-}
-
-
-Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
-  return Replace(graph()->NewNode(machine()->WordEqual(), value,
-                                  jsgraph()->TrueConstant()));
-}
-
-
-Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
-  Type* const value_type = NodeProperties::GetType(value);
-  Node* const value32 = graph()->NewNode(
-      machine()->TruncateFloat64ToInt32(TruncationMode::kRoundToZero), value);
-  // TODO(bmeurer): This fast case must be disabled until we kill the asm.js
-  // support in the generic JavaScript pipeline, because LoadBuffer is lying
-  // about its result.
-  // if (value_type->Is(Type::Signed32())) {
-  //   return ChangeInt32ToTagged(value32, control);
-  // }
-  Node* check_same = graph()->NewNode(
-      machine()->Float64Equal(), value,
-      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
-  Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
-
-  Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
-  Node* vsmi;
-  Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
-  Node* vbox;
-
-  // We only need to check for -0 if the {value} can potentially contain -0.
-  if (value_type->Maybe(Type::MinusZero())) {
-    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
-                                        jsgraph()->Int32Constant(0));
-    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                         check_zero, if_smi);
-
-    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
-    Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
-
-    // In case of 0, we need to check the high bits for the IEEE -0 pattern.
-    Node* check_negative = graph()->NewNode(
-        machine()->Int32LessThan(),
-        graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
-        jsgraph()->Int32Constant(0));
-    Node* branch_negative = graph()->NewNode(
-        common()->Branch(BranchHint::kFalse), check_negative, if_zero);
-
-    Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
-    Node* if_notnegative =
-        graph()->NewNode(common()->IfFalse(), branch_negative);
-
-    // We need to create a box for negative 0.
-    if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
-    if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
-  }
-
-  // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
-  // machines we need to deal with potential overflow and fallback to boxing.
-  if (machine()->Is64() || value_type->Is(Type::SignedSmall())) {
-    vsmi = ChangeInt32ToSmi(value32);
-  } else {
-    Node* smi_tag =
-        graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
-
-    Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
-    Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                        check_ovf, if_smi);
-
-    Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
-    if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
-
-    if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
-    vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
-  }
-
-  // Allocate the box for the {value}.
-  vbox = AllocateHeapNumberWithValue(value, if_box);
-
-  control = graph()->NewNode(common()->Merge(2), if_smi, if_box);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vsmi, vbox, control);
-  return Replace(value);
-}
-
-
-Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
-  if (machine()->Is64() ||
-      NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
-    return Replace(ChangeInt32ToSmi(value));
-  }
-
-  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
-
-  Node* ovf = graph()->NewNode(common()->Projection(1), add);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue =
-      AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), if_true);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(common()->Projection(0), add);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               vtrue, vfalse, merge);
-
-  return Replace(phi);
-}
-
-
-Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
-                                             Signedness signedness) {
-  if (NodeProperties::GetType(value)->Is(Type::TaggedSigned())) {
-    return Replace(ChangeSmiToInt32(value));
-  }
-
-  const Operator* op = (signedness == kSigned)
-                           ? machine()->ChangeFloat64ToInt32()
-                           : machine()->ChangeFloat64ToUint32();
-
-  if (NodeProperties::GetType(value)->Is(Type::TaggedPointer())) {
-    return Replace(graph()->NewNode(op, LoadHeapNumberValue(value, control)));
-  }
-
-  Node* check = TestNotSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = graph()->NewNode(op, LoadHeapNumberValue(value, if_true));
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = ChangeSmiToInt32(value);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                               vtrue, vfalse, merge);
-
-  return Replace(phi);
-}
-
-
-namespace {
-
-bool CanCover(Node* value, IrOpcode::Value opcode) {
-  if (value->opcode() != opcode) return false;
-  bool first = true;
-  for (Edge const edge : value->use_edges()) {
-    if (NodeProperties::IsControlEdge(edge)) continue;
-    if (NodeProperties::IsEffectEdge(edge)) continue;
-    DCHECK(NodeProperties::IsValueEdge(edge));
-    if (!first) return false;
-    first = false;
-  }
-  return true;
-}
-
-}  // namespace
-
-
-Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
-  if (CanCover(value, IrOpcode::kJSToNumber)) {
-    // ChangeTaggedToFloat64(JSToNumber(x)) =>
-    //   if IsSmi(x) then ChangeSmiToFloat64(x)
-    //   else let y = JSToNumber(x) in
-    //     if IsSmi(y) then ChangeSmiToFloat64(y)
-    //     else LoadHeapNumberValue(y)
-    Node* const object = NodeProperties::GetValueInput(value, 0);
-    Node* const context = NodeProperties::GetContextInput(value);
-    Node* const frame_state = NodeProperties::GetFrameStateInput(value, 0);
-    Node* const effect = NodeProperties::GetEffectInput(value);
-    Node* const control = NodeProperties::GetControlInput(value);
-
-    const Operator* merge_op = common()->Merge(2);
-    const Operator* ephi_op = common()->EffectPhi(2);
-    const Operator* phi_op = common()->Phi(MachineRepresentation::kFloat64, 2);
-
-    Node* check1 = TestNotSmi(object);
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
-                                    effect, if_true1);
-    Node* etrue1 = vtrue1;
-
-    Node* check2 = TestNotSmi(vtrue1);
-    Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_true1);
-
-    Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-    Node* vtrue2 = LoadHeapNumberValue(vtrue1, if_true2);
-
-    Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-    Node* vfalse2 = ChangeSmiToFloat64(vtrue1);
-
-    if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
-    vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1 = ChangeSmiToFloat64(object);
-    Node* efalse1 = effect;
-
-    Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
-    Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
-    Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
-
-    // Wire the new diamond into the graph, {JSToNumber} can still throw.
-    NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
-
-    // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
-    // the node and places it inside the diamond. Come up with a helper method!
-    for (Node* use : etrue1->uses()) {
-      if (use->opcode() == IrOpcode::kIfSuccess) {
-        use->ReplaceUses(merge1);
-        NodeProperties::ReplaceControlInput(branch2, use);
-      }
-    }
-
-    return Replace(phi1);
-  }
-
-  Node* check = TestNotSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = LoadHeapNumberValue(value, if_true);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = ChangeSmiToFloat64(value);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(
-      common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
-
-  return Replace(phi);
-}
-
-
-Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
-  if (NodeProperties::GetType(value)->Is(Type::UnsignedSmall())) {
-    return Replace(ChangeUint32ToSmi(value));
-  }
-
-  Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
-                                 SmiMaxValueConstant());
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = ChangeUint32ToSmi(value);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse =
-      AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               vtrue, vfalse, merge);
-
-  return Replace(phi);
-}
-
-
-namespace {
-
-WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
-                                         MachineRepresentation representation,
-                                         Type* field_type, Type* input_type) {
-  if (field_type->Is(Type::TaggedSigned()) ||
-      input_type->Is(Type::TaggedSigned())) {
-    // Write barriers are only for writes of heap objects.
-    return kNoWriteBarrier;
-  }
-  if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
-    // Write barriers are not necessary when storing true, false, null or
-    // undefined, because these special oddballs are always in the root set.
-    return kNoWriteBarrier;
-  }
-  if (base_is_tagged == kTaggedBase &&
-      representation == MachineRepresentation::kTagged) {
-    if (input_type->IsConstant() &&
-        input_type->AsConstant()->Value()->IsHeapObject()) {
-      Handle<HeapObject> input =
-          Handle<HeapObject>::cast(input_type->AsConstant()->Value());
-      if (input->IsMap()) {
-        // Write barriers for storing maps are cheaper.
-        return kMapWriteBarrier;
-      }
-      Isolate* const isolate = input->GetIsolate();
-      RootIndexMap root_index_map(isolate);
-      int root_index = root_index_map.Lookup(*input);
-      if (root_index != RootIndexMap::kInvalidRootIndex &&
-          isolate->heap()->RootIsImmortalImmovable(root_index)) {
-        // Write barriers are unnecessary for immortal immovable roots.
-        return kNoWriteBarrier;
-      }
-    }
-    if (field_type->Is(Type::TaggedPointer()) ||
-        input_type->Is(Type::TaggedPointer())) {
-      // Write barriers for heap objects don't need a Smi check.
-      return kPointerWriteBarrier;
-    }
-    // Write barriers are only for writes into heap objects (i.e. tagged base).
-    return kFullWriteBarrier;
-  }
-  return kNoWriteBarrier;
-}
-
-
-WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
-                                         MachineRepresentation representation,
-                                         int field_offset, Type* field_type,
-                                         Type* input_type) {
-  if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
-    // Write barriers for storing maps are cheaper.
-    return kMapWriteBarrier;
-  }
-  return ComputeWriteBarrierKind(base_is_tagged, representation, field_type,
-                                 input_type);
-}
-
-}  // namespace
-
-
-Reduction ChangeLowering::LoadField(Node* node) {
-  const FieldAccess& access = FieldAccessOf(node->op());
-  Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
-  node->InsertInput(graph()->zone(), 1, offset);
-  NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
-  return Changed(node);
-}
-
-
-Reduction ChangeLowering::StoreField(Node* node) {
-  const FieldAccess& access = FieldAccessOf(node->op());
-  Type* type = NodeProperties::GetType(node->InputAt(1));
-  WriteBarrierKind kind = ComputeWriteBarrierKind(
-      access.base_is_tagged, access.machine_type.representation(),
-      access.offset, access.type, type);
-  Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
-  node->InsertInput(graph()->zone(), 1, offset);
-  NodeProperties::ChangeOp(node,
-                           machine()->Store(StoreRepresentation(
-                               access.machine_type.representation(), kind)));
-  return Changed(node);
-}
-
-
-Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
-                                   Node* const key) {
-  Node* index = key;
-  const int element_size_shift =
-      ElementSizeLog2Of(access.machine_type.representation());
-  if (element_size_shift) {
-    index = graph()->NewNode(machine()->Word32Shl(), index,
-                             jsgraph()->Int32Constant(element_size_shift));
-  }
-  const int fixed_offset = access.header_size - access.tag();
-  if (fixed_offset) {
-    index = graph()->NewNode(machine()->Int32Add(), index,
-                             jsgraph()->Int32Constant(fixed_offset));
-  }
-  if (machine()->Is64()) {
-    // TODO(turbofan): This is probably only correct for typed arrays, and only
-    // if the typed arrays are at most 2GiB in size, which happens to match
-    // exactly our current situation.
-    index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
-  }
-  return index;
-}
-
-
-Reduction ChangeLowering::LoadElement(Node* node) {
-  const ElementAccess& access = ElementAccessOf(node->op());
-  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
-  NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
-  return Changed(node);
-}
-
-
-Reduction ChangeLowering::StoreElement(Node* node) {
-  const ElementAccess& access = ElementAccessOf(node->op());
-  Type* type = NodeProperties::GetType(node->InputAt(2));
-  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
-  NodeProperties::ChangeOp(
-      node, machine()->Store(StoreRepresentation(
-                access.machine_type.representation(),
-                ComputeWriteBarrierKind(access.base_is_tagged,
-                                        access.machine_type.representation(),
-                                        access.type, type))));
-  return Changed(node);
-}
-
-
-Reduction ChangeLowering::Allocate(Node* node) {
-  PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
-  if (pretenure == NOT_TENURED) {
-    Callable callable = CodeFactory::AllocateInNewSpace(isolate());
-    Node* target = jsgraph()->HeapConstant(callable.code());
-    CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-        isolate(), jsgraph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNoFlags, Operator::kNoThrow);
-    const Operator* op = common()->Call(descriptor);
-    node->InsertInput(graph()->zone(), 0, target);
-    node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
-    NodeProperties::ChangeOp(node, op);
-  } else {
-    DCHECK_EQ(TENURED, pretenure);
-    AllocationSpace space = OLD_SPACE;
-    Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
-    Operator::Properties props = node->op()->properties();
-    CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-        jsgraph()->zone(), f, 2, props, CallDescriptor::kNeedsFrameState);
-    ExternalReference ref(f, jsgraph()->isolate());
-    int32_t flags = AllocateTargetSpace::encode(space);
-    node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
-    node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
-    node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
-    node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
-    node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
-    NodeProperties::ChangeOp(node, common()->Call(desc));
-  }
-  return Changed(node);
-}
-
-Node* ChangeLowering::IsSmi(Node* value) {
-  return graph()->NewNode(
-      machine()->WordEqual(),
-      graph()->NewNode(machine()->WordAnd(), value,
-                       jsgraph()->IntPtrConstant(kSmiTagMask)),
-      jsgraph()->IntPtrConstant(kSmiTag));
-}
-
-Node* ChangeLowering::LoadHeapObjectMap(Node* object, Node* control) {
-  return graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), object,
-      jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
-      graph()->start(), control);
-}
-
-Node* ChangeLowering::LoadMapBitField(Node* map) {
-  return graph()->NewNode(
-      machine()->Load(MachineType::Uint8()), map,
-      jsgraph()->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag),
-      graph()->start(), graph()->start());
-}
-
-Node* ChangeLowering::LoadMapInstanceType(Node* map) {
-  return graph()->NewNode(
-      machine()->Load(MachineType::Uint8()), map,
-      jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
-      graph()->start(), graph()->start());
-}
-
-Reduction ChangeLowering::ObjectIsNumber(Node* node) {
-  Node* input = NodeProperties::GetValueInput(node, 0);
-  // TODO(bmeurer): Optimize somewhat based on input type.
-  Node* check = IsSmi(input);
-  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->Int32Constant(1);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(
-      machine()->WordEqual(), LoadHeapObjectMap(input, if_false),
-      jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
-  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  node->ReplaceInput(0, vtrue);
-  node->AppendInput(graph()->zone(), vfalse);
-  node->AppendInput(graph()->zone(), control);
-  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
-  return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsReceiver(Node* node) {
-  Node* input = NodeProperties::GetValueInput(node, 0);
-  // TODO(bmeurer): Optimize somewhat based on input type.
-  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-  Node* check = IsSmi(input);
-  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->Int32Constant(0);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse =
-      graph()->NewNode(machine()->Uint32LessThanOrEqual(),
-                       jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
-                       LoadMapInstanceType(LoadHeapObjectMap(input, if_false)));
-  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  node->ReplaceInput(0, vtrue);
-  node->AppendInput(graph()->zone(), vfalse);
-  node->AppendInput(graph()->zone(), control);
-  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
-  return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsUndetectable(Node* node) {
-  Node* input = NodeProperties::GetValueInput(node, 0);
-  // TODO(bmeurer): Optimize somewhat based on input type.
-  Node* check = IsSmi(input);
-  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->Int32Constant(0);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(
-      machine()->Word32Equal(),
-      graph()->NewNode(
-          machine()->Word32Equal(),
-          graph()->NewNode(machine()->Word32And(),
-                           jsgraph()->Uint32Constant(1 << Map::kIsUndetectable),
-                           LoadMapBitField(LoadHeapObjectMap(input, if_false))),
-          jsgraph()->Int32Constant(0)),
-      jsgraph()->Int32Constant(0));
-  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  node->ReplaceInput(0, vtrue);
-  node->AppendInput(graph()->zone(), vfalse);
-  node->AppendInput(graph()->zone(), control);
-  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
-  return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsSmi(Node* node) {
-  node->ReplaceInput(0,
-                     graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
-                                      jsgraph()->IntPtrConstant(kSmiTagMask)));
-  node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
-  NodeProperties::ChangeOp(node, machine()->WordEqual());
-  return Changed(node);
-}
-
-Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
-
-
-Graph* ChangeLowering::graph() const { return jsgraph()->graph(); }
-
-
-CommonOperatorBuilder* ChangeLowering::common() const {
-  return jsgraph()->common();
-}
-
-
-MachineOperatorBuilder* ChangeLowering::machine() const {
-  return jsgraph()->machine();
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
deleted file mode 100644
index 7e5078b..0000000
--- a/src/compiler/change-lowering.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_CHANGE_LOWERING_H_
-#define V8_COMPILER_CHANGE_LOWERING_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-struct ElementAccess;
-class JSGraph;
-class Linkage;
-class MachineOperatorBuilder;
-class Operator;
-
-class ChangeLowering final : public Reducer {
- public:
-  explicit ChangeLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
-  ~ChangeLowering() final;
-
-  Reduction Reduce(Node* node) final;
-
- private:
-  Node* HeapNumberValueIndexConstant();
-  Node* SmiMaxValueConstant();
-  Node* SmiShiftBitsConstant();
-
-  Node* AllocateHeapNumberWithValue(Node* value, Node* control);
-  Node* ChangeInt32ToFloat64(Node* value);
-  Node* ChangeInt32ToSmi(Node* value);
-  Node* ChangeSmiToFloat64(Node* value);
-  Node* ChangeSmiToInt32(Node* value);
-  Node* ChangeUint32ToFloat64(Node* value);
-  Node* ChangeUint32ToSmi(Node* value);
-  Node* LoadHeapNumberValue(Node* value, Node* control);
-  Node* TestNotSmi(Node* value);
-
-  Reduction ChangeBitToBool(Node* value, Node* control);
-  Reduction ChangeBoolToBit(Node* value);
-  Reduction ChangeFloat64ToTagged(Node* value, Node* control);
-  Reduction ChangeInt32ToTagged(Node* value, Node* control);
-  Reduction ChangeTaggedToFloat64(Node* value, Node* control);
-  Reduction ChangeTaggedToUI32(Node* value, Node* control,
-                               Signedness signedness);
-  Reduction ChangeUint32ToTagged(Node* value, Node* control);
-
-  Reduction LoadField(Node* node);
-  Reduction StoreField(Node* node);
-  Reduction LoadElement(Node* node);
-  Reduction StoreElement(Node* node);
-  Reduction Allocate(Node* node);
-
-  Node* IsSmi(Node* value);
-  Node* LoadHeapObjectMap(Node* object, Node* control);
-  Node* LoadMapBitField(Node* map);
-  Node* LoadMapInstanceType(Node* map);
-
-  Reduction ObjectIsNumber(Node* node);
-  Reduction ObjectIsReceiver(Node* node);
-  Reduction ObjectIsSmi(Node* node);
-  Reduction ObjectIsUndetectable(Node* node);
-
-  Node* ComputeIndex(const ElementAccess& access, Node* const key);
-  Graph* graph() const;
-  Isolate* isolate() const;
-  JSGraph* jsgraph() const { return jsgraph_; }
-  CommonOperatorBuilder* common() const;
-  MachineOperatorBuilder* machine() const;
-
-  JSGraph* const jsgraph_;
-  SetOncePointer<const Operator> allocate_heap_number_operator_;
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_CHANGE_LOWERING_H_
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
new file mode 100644
index 0000000..081f28b
--- /dev/null
+++ b/src/compiler/code-assembler.cc
@@ -0,0 +1,737 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
+                             const CallInterfaceDescriptor& descriptor,
+                             Code::Flags flags, const char* name,
+                             size_t result_size)
+    : CodeAssembler(
+          isolate, zone,
+          Linkage::GetStubCallDescriptor(
+              isolate, zone, descriptor, descriptor.GetStackParameterCount(),
+              CallDescriptor::kNoFlags, Operator::kNoProperties,
+              MachineType::AnyTagged(), result_size),
+          flags, name) {}
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+                             Code::Flags flags, const char* name)
+    : CodeAssembler(isolate, zone,
+                    Linkage::GetJSCallDescriptor(zone, false, parameter_count,
+                                                 CallDescriptor::kNoFlags),
+                    flags, name) {}
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
+                             CallDescriptor* call_descriptor, Code::Flags flags,
+                             const char* name)
+    : raw_assembler_(new RawMachineAssembler(
+          isolate, new (zone) Graph(zone), call_descriptor,
+          MachineType::PointerRepresentation(),
+          InstructionSelector::SupportedMachineOperatorFlags())),
+      flags_(flags),
+      name_(name),
+      code_generated_(false),
+      variables_(zone) {}
+
+CodeAssembler::~CodeAssembler() {}
+
+void CodeAssembler::CallPrologue() {}
+
+void CodeAssembler::CallEpilogue() {}
+
+Handle<Code> CodeAssembler::GenerateCode() {
+  DCHECK(!code_generated_);
+
+  Schedule* schedule = raw_assembler_->Export();
+  Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
+      isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
+      name_);
+
+  code_generated_ = true;
+  return code;
+}
+
+bool CodeAssembler::Is64() const { return raw_assembler_->machine()->Is64(); }
+
+bool CodeAssembler::IsFloat64RoundUpSupported() const {
+  return raw_assembler_->machine()->Float64RoundUp().IsSupported();
+}
+
+bool CodeAssembler::IsFloat64RoundDownSupported() const {
+  return raw_assembler_->machine()->Float64RoundDown().IsSupported();
+}
+
+bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
+  return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
+}
+
+Node* CodeAssembler::Int32Constant(int32_t value) {
+  return raw_assembler_->Int32Constant(value);
+}
+
+Node* CodeAssembler::Int64Constant(int64_t value) {
+  return raw_assembler_->Int64Constant(value);
+}
+
+Node* CodeAssembler::IntPtrConstant(intptr_t value) {
+  return raw_assembler_->IntPtrConstant(value);
+}
+
+Node* CodeAssembler::NumberConstant(double value) {
+  return raw_assembler_->NumberConstant(value);
+}
+
+Node* CodeAssembler::SmiConstant(Smi* value) {
+  return IntPtrConstant(bit_cast<intptr_t>(value));
+}
+
+Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
+  return raw_assembler_->HeapConstant(object);
+}
+
+Node* CodeAssembler::BooleanConstant(bool value) {
+  return raw_assembler_->BooleanConstant(value);
+}
+
+Node* CodeAssembler::ExternalConstant(ExternalReference address) {
+  return raw_assembler_->ExternalConstant(address);
+}
+
+Node* CodeAssembler::Float64Constant(double value) {
+  return raw_assembler_->Float64Constant(value);
+}
+
+Node* CodeAssembler::NaNConstant() {
+  return LoadRoot(Heap::kNanValueRootIndex);
+}
+
+bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
+  Int64Matcher m(node);
+  if (m.HasValue() &&
+      m.IsInRange(std::numeric_limits<int32_t>::min(),
+                  std::numeric_limits<int32_t>::max())) {
+    out_value = static_cast<int32_t>(m.Value());
+    return true;
+  }
+
+  return false;
+}
+
+bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
+  Int64Matcher m(node);
+  if (m.HasValue()) out_value = m.Value();
+  return m.HasValue();
+}
+
+bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
+  IntPtrMatcher m(node);
+  if (m.HasValue()) out_value = m.Value();
+  return m.HasValue();
+}
+
+Node* CodeAssembler::Parameter(int value) {
+  return raw_assembler_->Parameter(value);
+}
+
+void CodeAssembler::Return(Node* value) {
+  return raw_assembler_->Return(value);
+}
+
+void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
+
+Node* CodeAssembler::LoadFramePointer() {
+  return raw_assembler_->LoadFramePointer();
+}
+
+Node* CodeAssembler::LoadParentFramePointer() {
+  return raw_assembler_->LoadParentFramePointer();
+}
+
+Node* CodeAssembler::LoadStackPointer() {
+  return raw_assembler_->LoadStackPointer();
+}
+
+Node* CodeAssembler::SmiShiftBitsConstant() {
+  return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name)   \
+  Node* CodeAssembler::name(Node* a, Node* b) { \
+    return raw_assembler_->name(a, b);          \
+  }
+CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
+#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
+
+Node* CodeAssembler::WordShl(Node* value, int shift) {
+  return raw_assembler_->WordShl(value, IntPtrConstant(shift));
+}
+
+Node* CodeAssembler::WordShr(Node* value, int shift) {
+  return raw_assembler_->WordShr(value, IntPtrConstant(shift));
+}
+
+Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
+  if (raw_assembler_->machine()->Is64()) {
+    value = raw_assembler_->ChangeUint32ToUint64(value);
+  }
+  return value;
+}
+
+Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
+  if (raw_assembler_->machine()->Is64()) {
+    value = raw_assembler_->ChangeInt32ToInt64(value);
+  }
+  return value;
+}
+
+#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
+  Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
+CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
+#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
+
+Node* CodeAssembler::Load(MachineType rep, Node* base) {
+  return raw_assembler_->Load(rep, base);
+}
+
+Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
+  return raw_assembler_->Load(rep, base, index);
+}
+
+Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
+  return raw_assembler_->AtomicLoad(rep, base, index);
+}
+
+Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
+  if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
+    Handle<Object> root = isolate()->heap()->root_handle(root_index);
+    if (root->IsSmi()) {
+      return SmiConstant(Smi::cast(*root));
+    } else {
+      return HeapConstant(Handle<HeapObject>::cast(root));
+    }
+  }
+
+  Node* roots_array_start =
+      ExternalConstant(ExternalReference::roots_array_start(isolate()));
+  return Load(MachineType::AnyTagged(), roots_array_start,
+              IntPtrConstant(root_index * kPointerSize));
+}
+
+Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
+  return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+}
+
+Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
+                           Node* value) {
+  return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+}
+
+Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
+                                         Node* value) {
+  return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+}
+
+Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
+                                         Node* index, Node* value) {
+  return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+}
+
+Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
+                                 Node* index, Node* value) {
+  return raw_assembler_->AtomicStore(rep, base, index, value);
+}
+
+Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
+  DCHECK(Heap::RootCanBeWrittenAfterInitialization(root_index));
+  Node* roots_array_start =
+      ExternalConstant(ExternalReference::roots_array_start(isolate()));
+  return StoreNoWriteBarrier(MachineRepresentation::kTagged, roots_array_start,
+                             IntPtrConstant(root_index * kPointerSize), value);
+}
+
+Node* CodeAssembler::Projection(int index, Node* value) {
+  return raw_assembler_->Projection(index, value);
+}
+
+void CodeAssembler::BranchIf(Node* condition, Label* if_true, Label* if_false) {
+  Label if_condition_is_true(this), if_condition_is_false(this);
+  Branch(condition, &if_condition_is_true, &if_condition_is_false);
+  Bind(&if_condition_is_true);
+  Goto(if_true);
+  Bind(&if_condition_is_false);
+  Goto(if_false);
+}
+
+Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+                           Node** args) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
+  CallEpilogue();
+  return return_value;
+}
+
+Node* CodeAssembler::TailCallN(CallDescriptor* descriptor, Node* code_target,
+                               Node** args) {
+  return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id,
+                                 Node* context) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
+  CallEpilogue();
+  return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+                                 Node* arg1) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
+  CallEpilogue();
+  return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+                                 Node* arg1, Node* arg2) {
+  CallPrologue();
+  Node* return_value =
+      raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+  CallEpilogue();
+  return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+                                 Node* arg1, Node* arg2, Node* arg3) {
+  CallPrologue();
+  Node* return_value =
+      raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
+  CallEpilogue();
+  return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+                                 Node* arg1, Node* arg2, Node* arg3,
+                                 Node* arg4) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
+                                                    arg3, arg4, context);
+  CallEpilogue();
+  return return_value;
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                     Node* context) {
+  return raw_assembler_->TailCallRuntime0(function_id, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1) {
+  return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2) {
+  return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2,
+                                     Node* arg3) {
+  return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
+                                          context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2,
+                                     Node* arg3, Node* arg4) {
+  return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
+                                          context);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+                              Node* arg1, size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), target, context, arg1, result_size);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+                              Node* arg1, Node* arg2, size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), target, context, arg1, arg2,
+                  result_size);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+                              Node* arg1, Node* arg2, Node* arg3,
+                              size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+                  result_size);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, Node* arg1,
+                              size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(2);
+  args[0] = arg1;
+  args[1] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, Node* arg1,
+                              Node* arg2, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(3);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, Node* arg1,
+                              Node* arg2, Node* arg3, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(4);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, Node* arg1,
+                              Node* arg2, Node* arg3, Node* arg4,
+                              size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(5);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                              Node* target, Node* context, Node* arg1,
+                              Node* arg2, Node* arg3, Node* arg4, Node* arg5,
+                              size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(6);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = arg5;
+  args[5] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+                                  Node* arg1, Node* arg2, size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
+                      result_size);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+                                  Node* arg1, Node* arg2, Node* arg3,
+                                  size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+                      result_size);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(3);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = context;
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(4);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = context;
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallBytecodeDispatch(
+    const CallInterfaceDescriptor& interface_descriptor,
+    Node* code_target_address, Node** args) {
+  CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
+      isolate(), zone(), interface_descriptor,
+      interface_descriptor.GetStackParameterCount());
+  return raw_assembler_->TailCallN(descriptor, code_target_address, args);
+}
+
+void CodeAssembler::Goto(CodeAssembler::Label* label) {
+  label->MergeVariables();
+  raw_assembler_->Goto(label->label_);
+}
+
+void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
+  Label false_label(this);
+  Branch(condition, true_label, &false_label);
+  Bind(&false_label);
+}
+
+void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
+  Label true_label(this);
+  Branch(condition, &true_label, false_label);
+  Bind(&true_label);
+}
+
+void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
+                           CodeAssembler::Label* false_label) {
+  true_label->MergeVariables();
+  false_label->MergeVariables();
+  return raw_assembler_->Branch(condition, true_label->label_,
+                                false_label->label_);
+}
+
+void CodeAssembler::Switch(Node* index, Label* default_label,
+                           int32_t* case_values, Label** case_labels,
+                           size_t case_count) {
+  RawMachineLabel** labels =
+      new (zone()->New(sizeof(RawMachineLabel*) * case_count))
+          RawMachineLabel*[case_count];
+  for (size_t i = 0; i < case_count; ++i) {
+    labels[i] = case_labels[i]->label_;
+    case_labels[i]->MergeVariables();
+    default_label->MergeVariables();
+  }
+  return raw_assembler_->Switch(index, default_label->label_, case_values,
+                                labels, case_count);
+}
+
+// RawMachineAssembler delegate helpers:
+Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
+
+Factory* CodeAssembler::factory() const { return isolate()->factory(); }
+
+Graph* CodeAssembler::graph() const { return raw_assembler_->graph(); }
+
+Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
+
+// The core implementation of Variable is stored through an indirection so
+// that it can outlive the often block-scoped Variable declarations. This is
+// needed to ensure that variable binding and merging through phis can
+// properly be verified.
+class CodeAssembler::Variable::Impl : public ZoneObject {
+ public:
+  explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
+  Node* value_;
+  MachineRepresentation rep_;
+};
+
+CodeAssembler::Variable::Variable(CodeAssembler* assembler,
+                                  MachineRepresentation rep)
+    : impl_(new (assembler->zone()) Impl(rep)) {
+  assembler->variables_.push_back(impl_);
+}
+
+void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+
+Node* CodeAssembler::Variable::value() const {
+  DCHECK_NOT_NULL(impl_->value_);
+  return impl_->value_;
+}
+
+MachineRepresentation CodeAssembler::Variable::rep() const {
+  return impl_->rep_;
+}
+
+bool CodeAssembler::Variable::IsBound() const {
+  return impl_->value_ != nullptr;
+}
+
+CodeAssembler::Label::Label(CodeAssembler* assembler, int merged_value_count,
+                            CodeAssembler::Variable** merged_variables,
+                            CodeAssembler::Label::Type type)
+    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+  void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+  label_ = new (buffer)
+      RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
+                                        : RawMachineLabel::kNonDeferred);
+  for (int i = 0; i < merged_value_count; ++i) {
+    variable_phis_[merged_variables[i]->impl_] = nullptr;
+  }
+}
+
+void CodeAssembler::Label::MergeVariables() {
+  ++merge_count_;
+  for (auto var : assembler_->variables_) {
+    size_t count = 0;
+    Node* node = var->value_;
+    if (node != nullptr) {
+      auto i = variable_merges_.find(var);
+      if (i != variable_merges_.end()) {
+        i->second.push_back(node);
+        count = i->second.size();
+      } else {
+        count = 1;
+        variable_merges_[var] = std::vector<Node*>(1, node);
+      }
+    }
+    // If the following asserts, then you've jumped to a label without a bound
+    // variable along that path that expects to merge its value into a phi.
+    DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
+           count == merge_count_);
+    USE(count);
+
+    // If the label is already bound, we already know the set of variables to
+    // merge and phi nodes have already been created.
+    if (bound_) {
+      auto phi = variable_phis_.find(var);
+      if (phi != variable_phis_.end()) {
+        DCHECK_NOT_NULL(phi->second);
+        assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+      } else {
+        auto i = variable_merges_.find(var);
+        if (i != variable_merges_.end()) {
+          // If the following assert fires, then you've declared a variable that
+          // has the same bound value along all paths up until the point you
+          // bound this label, but then later merged a path with a new value for
+          // the variable after the label bind (it's not possible to add phis to
+          // the bound label after the fact, just make sure to list the variable
+          // in the label's constructor's list of merged variables).
+          DCHECK(find_if(i->second.begin(), i->second.end(),
+                         [node](Node* e) -> bool { return node != e; }) ==
+                 i->second.end());
+        }
+      }
+    }
+  }
+}
+
+void CodeAssembler::Label::Bind() {
+  DCHECK(!bound_);
+  assembler_->raw_assembler_->Bind(label_);
+
+  // Make sure that all variables that have changed along any path up to this
+  // point are marked as merge variables.
+  for (auto var : assembler_->variables_) {
+    Node* shared_value = nullptr;
+    auto i = variable_merges_.find(var);
+    if (i != variable_merges_.end()) {
+      for (auto value : i->second) {
+        DCHECK(value != nullptr);
+        if (value != shared_value) {
+          if (shared_value == nullptr) {
+            shared_value = value;
+          } else {
+            variable_phis_[var] = nullptr;
+          }
+        }
+      }
+    }
+  }
+
+  for (auto var : variable_phis_) {
+    CodeAssembler::Variable::Impl* var_impl = var.first;
+    auto i = variable_merges_.find(var_impl);
+    // If the following assert fires, then a variable that has been marked as
+    // being merged at the label--either by explicitly marking it so in the
+    // label constructor or by having seen different bound values at branches
+    // into the label--doesn't have a bound value along all of the paths that
+    // have been merged into the label up to this point.
+    DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
+    Node* phi = assembler_->raw_assembler_->Phi(
+        var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
+    variable_phis_[var_impl] = phi;
+  }
+
+  // Bind all variables to a merge phi, the common value along all paths or
+  // null.
+  for (auto var : assembler_->variables_) {
+    auto i = variable_phis_.find(var);
+    if (i != variable_phis_.end()) {
+      var->value_ = i->second;
+    } else {
+      auto j = variable_merges_.find(var);
+      if (j != variable_merges_.end() && j->second.size() == merge_count_) {
+        var->value_ = j->second.back();
+      } else {
+        var->value_ = nullptr;
+      }
+    }
+  }
+
+  bound_ = true;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
new file mode 100644
index 0000000..39af56d
--- /dev/null
+++ b/src/compiler/code-assembler.h
@@ -0,0 +1,408 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_ASSEMBLER_H_
+#define V8_COMPILER_CODE_ASSEMBLER_H_
+
+#include <map>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/builtins.h"
+#include "src/heap/heap.h"
+#include "src/machine-type.h"
+#include "src/runtime/runtime.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Callable;
+class CallInterfaceDescriptor;
+class Isolate;
+class Factory;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class Graph;
+class Node;
+class Operator;
+class RawMachineAssembler;
+class RawMachineLabel;
+class Schedule;
+
+#define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+  V(Float32Equal)                                \
+  V(Float32LessThan)                             \
+  V(Float32LessThanOrEqual)                      \
+  V(Float32GreaterThan)                          \
+  V(Float32GreaterThanOrEqual)                   \
+  V(Float64Equal)                                \
+  V(Float64LessThan)                             \
+  V(Float64LessThanOrEqual)                      \
+  V(Float64GreaterThan)                          \
+  V(Float64GreaterThanOrEqual)                   \
+  V(Int32GreaterThan)                            \
+  V(Int32GreaterThanOrEqual)                     \
+  V(Int32LessThan)                               \
+  V(Int32LessThanOrEqual)                        \
+  V(IntPtrLessThan)                              \
+  V(IntPtrLessThanOrEqual)                       \
+  V(IntPtrGreaterThan)                           \
+  V(IntPtrGreaterThanOrEqual)                    \
+  V(IntPtrEqual)                                 \
+  V(Uint32LessThan)                              \
+  V(UintPtrLessThan)                             \
+  V(UintPtrGreaterThanOrEqual)                   \
+  V(WordEqual)                                   \
+  V(WordNotEqual)                                \
+  V(Word32Equal)                                 \
+  V(Word32NotEqual)                              \
+  V(Word64Equal)                                 \
+  V(Word64NotEqual)
+
+#define CODE_ASSEMBLER_BINARY_OP_LIST(V)   \
+  CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+  V(Float64Add)                            \
+  V(Float64Sub)                            \
+  V(Float64Mul)                            \
+  V(Float64Div)                            \
+  V(Float64Mod)                            \
+  V(Float64InsertLowWord32)                \
+  V(Float64InsertHighWord32)               \
+  V(IntPtrAdd)                             \
+  V(IntPtrAddWithOverflow)                 \
+  V(IntPtrSub)                             \
+  V(IntPtrSubWithOverflow)                 \
+  V(IntPtrMul)                             \
+  V(Int32Add)                              \
+  V(Int32AddWithOverflow)                  \
+  V(Int32Sub)                              \
+  V(Int32Mul)                              \
+  V(Int32Div)                              \
+  V(WordOr)                                \
+  V(WordAnd)                               \
+  V(WordXor)                               \
+  V(WordShl)                               \
+  V(WordShr)                               \
+  V(WordSar)                               \
+  V(WordRor)                               \
+  V(Word32Or)                              \
+  V(Word32And)                             \
+  V(Word32Xor)                             \
+  V(Word32Shl)                             \
+  V(Word32Shr)                             \
+  V(Word32Sar)                             \
+  V(Word32Ror)                             \
+  V(Word64Or)                              \
+  V(Word64And)                             \
+  V(Word64Xor)                             \
+  V(Word64Shr)                             \
+  V(Word64Sar)                             \
+  V(Word64Ror)
+
+#define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
+  V(Float64Neg)                         \
+  V(Float64Sqrt)                        \
+  V(Float64ExtractLowWord32)            \
+  V(Float64ExtractHighWord32)           \
+  V(BitcastWordToTagged)                \
+  V(TruncateFloat64ToWord32)            \
+  V(TruncateInt64ToInt32)               \
+  V(ChangeFloat64ToUint32)              \
+  V(ChangeInt32ToFloat64)               \
+  V(ChangeInt32ToInt64)                 \
+  V(ChangeUint32ToFloat64)              \
+  V(ChangeUint32ToUint64)               \
+  V(RoundFloat64ToInt32)                \
+  V(Float64RoundDown)                   \
+  V(Float64RoundUp)                     \
+  V(Float64RoundTruncate)               \
+  V(Word32Clz)
+
+// A "public" interface used by components outside of compiler directory to
+// create code objects with TurboFan's backend. This class is mostly a thin shim
+// around the RawMachineAssembler, and its primary job is to ensure that the
+// innards of the RawMachineAssembler and other compiler implementation details
+// don't leak outside of the the compiler directory..
+//
+// V8 components that need to generate low-level code using this interface
+// should include this header--and this header only--from the compiler directory
+// (this is actually enforced). Since all interesting data structures are
+// forward declared, it's not possible for clients to peek inside the compiler
+// internals.
+//
+// In addition to providing isolation between TurboFan and code generation
+// clients, CodeAssembler also provides an abstraction for creating variables
+// and enhanced Label functionality to merge variable values along paths where
+// they have differing values, including loops.
+class CodeAssembler {
+ public:
+  // Create with CallStub linkage.
+  // |result_size| specifies the number of results returned by the stub.
+  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+  CodeAssembler(Isolate* isolate, Zone* zone,
+                const CallInterfaceDescriptor& descriptor, Code::Flags flags,
+                const char* name, size_t result_size = 1);
+
+  // Create with JSCall linkage.
+  CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+                Code::Flags flags, const char* name);
+
+  virtual ~CodeAssembler();
+
+  Handle<Code> GenerateCode();
+
+  bool Is64() const;
+  bool IsFloat64RoundUpSupported() const;
+  bool IsFloat64RoundDownSupported() const;
+  bool IsFloat64RoundTruncateSupported() const;
+
+  class Label;
+  class Variable {
+   public:
+    explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
+    void Bind(Node* value);
+    Node* value() const;
+    MachineRepresentation rep() const;
+    bool IsBound() const;
+
+   private:
+    friend class CodeAssembler;
+    class Impl;
+    Impl* impl_;
+  };
+
+  enum AllocationFlag : uint8_t {
+    kNone = 0,
+    kDoubleAlignment = 1,
+    kPretenured = 1 << 1
+  };
+
+  typedef base::Flags<AllocationFlag> AllocationFlags;
+
+  // ===========================================================================
+  // Base Assembler
+  // ===========================================================================
+
+  // Constants.
+  Node* Int32Constant(int32_t value);
+  Node* Int64Constant(int64_t value);
+  Node* IntPtrConstant(intptr_t value);
+  Node* NumberConstant(double value);
+  Node* SmiConstant(Smi* value);
+  Node* HeapConstant(Handle<HeapObject> object);
+  Node* BooleanConstant(bool value);
+  Node* ExternalConstant(ExternalReference address);
+  Node* Float64Constant(double value);
+  Node* NaNConstant();
+
+  bool ToInt32Constant(Node* node, int32_t& out_value);
+  bool ToInt64Constant(Node* node, int64_t& out_value);
+  bool ToIntPtrConstant(Node* node, intptr_t& out_value);
+
+  Node* Parameter(int value);
+  void Return(Node* value);
+
+  void Bind(Label* label);
+  void Goto(Label* label);
+  void GotoIf(Node* condition, Label* true_label);
+  void GotoUnless(Node* condition, Label* false_label);
+  void Branch(Node* condition, Label* true_label, Label* false_label);
+
+  void Switch(Node* index, Label* default_label, int32_t* case_values,
+              Label** case_labels, size_t case_count);
+
+  // Access to the frame pointer
+  Node* LoadFramePointer();
+  Node* LoadParentFramePointer();
+
+  // Access to the stack pointer
+  Node* LoadStackPointer();
+
+  // Load raw memory location.
+  Node* Load(MachineType rep, Node* base);
+  Node* Load(MachineType rep, Node* base, Node* index);
+  Node* AtomicLoad(MachineType rep, Node* base, Node* index);
+
+  // Load a value from the root array.
+  Node* LoadRoot(Heap::RootListIndex root_index);
+
+  // Store value to raw memory location.
+  Node* Store(MachineRepresentation rep, Node* base, Node* value);
+  Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
+  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+                            Node* value);
+  Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+                    Node* value);
+
+  // Store a value to the root array.
+  Node* StoreRoot(Heap::RootListIndex root_index, Node* value);
+
+// Basic arithmetic operations.
+#define DECLARE_CODE_ASSEMBLER_BINARY_OP(name) Node* name(Node* a, Node* b);
+  CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
+#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
+
+  Node* WordShl(Node* value, int shift);
+  Node* WordShr(Node* value, int shift);
+
+// Unary
+#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name) Node* name(Node* a);
+  CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
+#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
+
+  // No-op on 32-bit, otherwise zero extend.
+  Node* ChangeUint32ToWord(Node* value);
+  // No-op on 32-bit, otherwise sign extend.
+  Node* ChangeInt32ToIntPtr(Node* value);
+
+  // Projections
+  Node* Projection(int index, Node* value);
+
+  // Calls
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3, Node* arg4);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3, Node* arg4, Node* arg5);
+
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2, Node* arg3);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2, Node* arg3, Node* arg4);
+
+  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+                 size_t result_size = 1);
+  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+                 Node* arg2, size_t result_size = 1);
+  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+                 Node* arg2, Node* arg3, size_t result_size = 1);
+
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3,
+                 size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                 size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                 Node* arg5, size_t result_size = 1);
+
+  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+                     Node* arg2, size_t result_size = 1);
+  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+                     Node* arg2, Node* arg3, size_t result_size = 1);
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, Node* arg1, Node* arg2,
+                     size_t result_size = 1);
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, Node* arg1, Node* arg2, Node* arg3,
+                     size_t result_size = 1);
+
+  Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
+                                 Node* code_target_address, Node** args);
+
+  // Branching helpers.
+  void BranchIf(Node* condition, Label* if_true, Label* if_false);
+
+#define BRANCH_HELPER(name)                                                \
+  void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
+    BranchIf(name(a, b), if_true, if_false);                               \
+  }
+  CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
+#undef BRANCH_HELPER
+
+  // Helpers which delegate to RawMachineAssembler.
+  Factory* factory() const;
+  Isolate* isolate() const;
+  Zone* zone() const;
+
+ protected:
+  // Protected helpers which delegate to RawMachineAssembler.
+  Graph* graph() const;
+
+  Node* SmiShiftBitsConstant();
+
+  // Enables subclasses to perform operations before and after a call.
+  virtual void CallPrologue();
+  virtual void CallEpilogue();
+
+ private:
+  friend class CodeAssemblerTester;
+
+  CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
+                Code::Flags flags, const char* name);
+
+  Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+  Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+
+  base::SmartPointer<RawMachineAssembler> raw_assembler_;
+  Code::Flags flags_;
+  const char* name_;
+  bool code_generated_;
+  ZoneVector<Variable::Impl*> variables_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(CodeAssembler::AllocationFlags);
+
+class CodeAssembler::Label {
+ public:
+  enum Type { kDeferred, kNonDeferred };
+
+  explicit Label(
+      CodeAssembler* assembler,
+      CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
+      : CodeAssembler::Label(assembler, 0, nullptr, type) {}
+  Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
+        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
+      : CodeAssembler::Label(assembler, 1, &merged_variable, type) {}
+  Label(CodeAssembler* assembler, int merged_variable_count,
+        CodeAssembler::Variable** merged_variables,
+        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
+  ~Label() {}
+
+ private:
+  friend class CodeAssembler;
+
+  void Bind();
+  void MergeVariables();
+
+  bool bound_;
+  size_t merge_count_;
+  CodeAssembler* assembler_;
+  RawMachineLabel* label_;
+  // Map of variables that need to be merged to their phi nodes (or placeholders
+  // for those phis).
+  std::map<Variable::Impl*, Node*> variable_phis_;
+  // Map of variables to the list of value nodes that have been added from each
+  // merge path in their order of merging.
+  std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CODE_ASSEMBLER_H_
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 7de32c5..adb8400 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -43,6 +43,10 @@
     return ToConstant(instr_->InputAt(index)).ToInt32();
   }
 
+  uint32_t InputUint32(size_t index) {
+    return bit_cast<uint32_t>(InputInt32(index));
+  }
+
   int64_t InputInt64(size_t index) {
     return ToConstant(instr_->InputAt(index)).ToInt64();
   }
@@ -127,7 +131,7 @@
     return ToConstant(op).ToHeapObject();
   }
 
-  Frame* frame() const { return gen_->frame(); }
+  const Frame* frame() const { return gen_->frame(); }
   FrameAccessState* frame_access_state() const {
     return gen_->frame_access_state();
   }
@@ -163,7 +167,7 @@
 
   Label* entry() { return &entry_; }
   Label* exit() { return &exit_; }
-  Frame* frame() const { return frame_; }
+  const Frame* frame() const { return frame_; }
   Isolate* isolate() const { return masm()->isolate(); }
   MacroAssembler* masm() const { return masm_; }
   OutOfLineCode* next() const { return next_; }
@@ -171,7 +175,7 @@
  private:
   Label entry_;
   Label exit_;
-  Frame* const frame_;
+  const Frame* const frame_;
   MacroAssembler* const masm_;
   OutOfLineCode* const next_;
 };
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 086da56..5cf9d97 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -33,7 +33,7 @@
 
 CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
                              InstructionSequence* code, CompilationInfo* info)
-    : frame_access_state_(new (code->zone()) FrameAccessState(frame)),
+    : frame_access_state_(nullptr),
       linkage_(linkage),
       code_(code),
       info_(info),
@@ -56,6 +56,12 @@
   for (int i = 0; i < code->InstructionBlockCount(); ++i) {
     new (&labels_[i]) Label;
   }
+  CreateFrameAccessState(frame);
+}
+
+void CodeGenerator::CreateFrameAccessState(Frame* frame) {
+  FinishFrame(frame);
+  frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
 }
 
 Handle<Code> CodeGenerator::GenerateCode() {
@@ -96,9 +102,6 @@
     }
   }
 
-  // Finish the Frame
-  frame()->AlignFrame(kFrameAlignmentInBytes);
-  AssembleSetupStackPointer();
   // Assemble all non-deferred blocks, followed by deferred ones.
   for (int deferred = 0; deferred < 2; ++deferred) {
     for (const InstructionBlock* block : code()->instruction_blocks()) {
@@ -143,7 +146,7 @@
 
       masm()->bind(GetLabel(current_block_));
       if (block->must_construct_frame()) {
-        AssemblePrologue();
+        AssembleConstructFrame();
         // We need to setup the root register after we assemble the prologue, to
         // avoid clobbering callee saved registers in case of C linkage and
         // using the roots.
@@ -153,12 +156,14 @@
         }
       }
 
+      CodeGenResult result;
       if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
         ConstantPoolUnavailableScope constant_pool_unavailable(masm());
-        AssembleBlock(block);
+        result = AssembleBlock(block);
       } else {
-        AssembleBlock(block);
+        result = AssembleBlock(block);
       }
+      if (result != kSuccess) return Handle<Code>();
     }
   }
 
@@ -274,8 +279,7 @@
 bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
                                               int* slot_return) {
   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
-    if (info()->has_context() && object.is_identical_to(info()->context()) &&
-        !info()->is_osr()) {
+    if (object.is_identical_to(info()->context()) && !info()->is_osr()) {
       *slot_return = Frame::kContextSlot;
       return true;
     } else if (object.is_identical_to(info()->closure())) {
@@ -302,15 +306,18 @@
   return false;
 }
 
-void CodeGenerator::AssembleBlock(const InstructionBlock* block) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
+    const InstructionBlock* block) {
   for (int i = block->code_start(); i < block->code_end(); ++i) {
     Instruction* instr = code()->InstructionAt(i);
-    AssembleInstruction(instr, block);
+    CodeGenResult result = AssembleInstruction(instr, block);
+    if (result != kSuccess) return result;
   }
+  return kSuccess;
 }
 
-void CodeGenerator::AssembleInstruction(Instruction* instr,
-                                        const InstructionBlock* block) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
+    Instruction* instr, const InstructionBlock* block) {
   AssembleGaps(instr);
   DCHECK_IMPLIES(
       block->must_deconstruct_frame(),
@@ -321,7 +328,8 @@
   }
   AssembleSourcePosition(instr);
   // Assemble architecture-specific code for the instruction.
-  AssembleArchInstruction(instr);
+  CodeGenResult result = AssembleArchInstruction(instr);
+  if (result != kSuccess) return result;
 
   FlagsMode mode = FlagsModeField::decode(instr->opcode());
   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
@@ -337,7 +345,7 @@
         if (!IsNextInAssemblyOrder(true_rpo)) {
           AssembleArchJump(true_rpo);
         }
-        return;
+        return kSuccess;
       }
       if (IsNextInAssemblyOrder(true_rpo)) {
         // true block is next, can fall through if condition negated.
@@ -379,6 +387,7 @@
       break;
     }
   }
+  return kSuccess;
 }
 
 
@@ -498,10 +507,6 @@
     handlers_.push_back({caught, GetLabel(handler_rpo), masm()->pc_offset()});
   }
 
-  if (flags & CallDescriptor::kNeedsNopAfterCall) {
-    AddNopForSmiCodeInlining();
-  }
-
   if (needs_frame_state) {
     MarkLazyDeoptSite();
     // If the frame state is present, it starts at argument 1 (just after the
@@ -528,7 +533,7 @@
     // by calls.)
     for (size_t i = 0; i < descriptor->GetSize(); i++) {
       InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
-      CHECK(op->IsStackSlot() || op->IsDoubleStackSlot() || op->IsImmediate());
+      CHECK(op->IsStackSlot() || op->IsFPStackSlot() || op->IsImmediate());
     }
 #endif
     safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
@@ -710,7 +715,7 @@
     } else {
       CHECK(false);
     }
-  } else if (op->IsDoubleStackSlot()) {
+  } else if (op->IsFPStackSlot()) {
     DCHECK(IsFloatingPoint(type.representation()));
     translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
   } else if (op->IsRegister()) {
@@ -728,7 +733,7 @@
     } else {
       CHECK(false);
     }
-  } else if (op->IsDoubleRegister()) {
+  } else if (op->IsFPRegister()) {
     DCHECK(IsFloatingPoint(type.representation()));
     InstructionOperandConverter converter(this, instr);
     translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index b82181c..5f35e8a 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -54,7 +54,7 @@
 
   InstructionSequence* code() const { return code_; }
   FrameAccessState* frame_access_state() const { return frame_access_state_; }
-  Frame* frame() const { return frame_access_state_->frame(); }
+  const Frame* frame() const { return frame_access_state_->frame(); }
   Isolate* isolate() const { return info_->isolate(); }
   Linkage* linkage() const { return linkage_; }
 
@@ -67,6 +67,12 @@
   Zone* zone() const { return code()->zone(); }
   CompilationInfo* info() const { return info_; }
 
+  // Create the FrameAccessState object. The Frame is immutable from here on.
+  void CreateFrameAccessState(Frame* frame);
+
+  // Architecture - specific frame finalization.
+  void FinishFrame(Frame* frame);
+
   // Checks if {block} will appear directly after {current_block_} when
   // assembling code, in which case, a fall-through can be used.
   bool IsNextInAssemblyOrder(RpoNumber block) const;
@@ -84,11 +90,14 @@
   bool IsMaterializableFromRoot(Handle<HeapObject> object,
                                 Heap::RootListIndex* index_return);
 
+  enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };
+
   // Assemble instructions for the specified block.
-  void AssembleBlock(const InstructionBlock* block);
+  CodeGenResult AssembleBlock(const InstructionBlock* block);
 
   // Assemble code for the specified instruction.
-  void AssembleInstruction(Instruction* instr, const InstructionBlock* block);
+  CodeGenResult AssembleInstruction(Instruction* instr,
+                                    const InstructionBlock* block);
   void AssembleSourcePosition(Instruction* instr);
   void AssembleGaps(Instruction* instr);
 
@@ -96,21 +105,19 @@
   // ============= Architecture-specific code generation methods. ==============
   // ===========================================================================
 
-  void AssembleArchInstruction(Instruction* instr);
+  CodeGenResult AssembleArchInstruction(Instruction* instr);
   void AssembleArchJump(RpoNumber target);
   void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
   void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
   void AssembleArchLookupSwitch(Instruction* instr);
   void AssembleArchTableSwitch(Instruction* instr);
 
-  void AssembleDeoptimizerCall(int deoptimization_id,
-                               Deoptimizer::BailoutType bailout_type);
+  CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
+                                        Deoptimizer::BailoutType bailout_type);
 
   // Generates an architecture-specific, descriptor-specific prologue
   // to set up a stack frame.
-  void AssemblePrologue();
-
-  void AssembleSetupStackPointer();
+  void AssembleConstructFrame();
 
   // Generates an architecture-specific, descriptor-specific return sequence
   // to tear down a stack frame.
@@ -174,7 +181,6 @@
                                              Translation* translation);
   void AddTranslationForOperand(Translation* translation, Instruction* instr,
                                 InstructionOperand* op, MachineType type);
-  void AddNopForSmiCodeInlining();
   void EnsureSpaceForLazyDeopt();
   void MarkLazyDeoptSite();
 
diff --git a/src/compiler/code-stub-assembler.cc b/src/compiler/code-stub-assembler.cc
deleted file mode 100644
index bbb4d63..0000000
--- a/src/compiler/code-stub-assembler.cc
+++ /dev/null
@@ -1,1353 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/code-stub-assembler.h"
-
-#include <ostream>
-
-#include "src/code-factory.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/machine-type.h"
-#include "src/macro-assembler.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
-                                     const CallInterfaceDescriptor& descriptor,
-                                     Code::Flags flags, const char* name,
-                                     size_t result_size)
-    : CodeStubAssembler(
-          isolate, zone,
-          Linkage::GetStubCallDescriptor(
-              isolate, zone, descriptor, descriptor.GetStackParameterCount(),
-              CallDescriptor::kNoFlags, Operator::kNoProperties,
-              MachineType::AnyTagged(), result_size),
-          flags, name) {}
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
-                                     int parameter_count, Code::Flags flags,
-                                     const char* name)
-    : CodeStubAssembler(isolate, zone, Linkage::GetJSCallDescriptor(
-                                           zone, false, parameter_count,
-                                           CallDescriptor::kNoFlags),
-                        flags, name) {}
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
-                                     CallDescriptor* call_descriptor,
-                                     Code::Flags flags, const char* name)
-    : raw_assembler_(new RawMachineAssembler(
-          isolate, new (zone) Graph(zone), call_descriptor,
-          MachineType::PointerRepresentation(),
-          InstructionSelector::SupportedMachineOperatorFlags())),
-      flags_(flags),
-      name_(name),
-      code_generated_(false),
-      variables_(zone) {}
-
-CodeStubAssembler::~CodeStubAssembler() {}
-
-void CodeStubAssembler::CallPrologue() {}
-
-void CodeStubAssembler::CallEpilogue() {}
-
-Handle<Code> CodeStubAssembler::GenerateCode() {
-  DCHECK(!code_generated_);
-
-  Schedule* schedule = raw_assembler_->Export();
-  Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
-      name_);
-
-  code_generated_ = true;
-  return code;
-}
-
-
-Node* CodeStubAssembler::Int32Constant(int value) {
-  return raw_assembler_->Int32Constant(value);
-}
-
-
-Node* CodeStubAssembler::IntPtrConstant(intptr_t value) {
-  return raw_assembler_->IntPtrConstant(value);
-}
-
-
-Node* CodeStubAssembler::NumberConstant(double value) {
-  return raw_assembler_->NumberConstant(value);
-}
-
-Node* CodeStubAssembler::SmiConstant(Smi* value) {
-  return IntPtrConstant(bit_cast<intptr_t>(value));
-}
-
-Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
-  return raw_assembler_->HeapConstant(object);
-}
-
-
-Node* CodeStubAssembler::BooleanConstant(bool value) {
-  return raw_assembler_->BooleanConstant(value);
-}
-
-Node* CodeStubAssembler::ExternalConstant(ExternalReference address) {
-  return raw_assembler_->ExternalConstant(address);
-}
-
-Node* CodeStubAssembler::Float64Constant(double value) {
-  return raw_assembler_->Float64Constant(value);
-}
-
-Node* CodeStubAssembler::BooleanMapConstant() {
-  return HeapConstant(isolate()->factory()->boolean_map());
-}
-
-Node* CodeStubAssembler::HeapNumberMapConstant() {
-  return HeapConstant(isolate()->factory()->heap_number_map());
-}
-
-Node* CodeStubAssembler::NullConstant() {
-  return LoadRoot(Heap::kNullValueRootIndex);
-}
-
-Node* CodeStubAssembler::UndefinedConstant() {
-  return LoadRoot(Heap::kUndefinedValueRootIndex);
-}
-
-Node* CodeStubAssembler::Parameter(int value) {
-  return raw_assembler_->Parameter(value);
-}
-
-void CodeStubAssembler::Return(Node* value) {
-  return raw_assembler_->Return(value);
-}
-
-void CodeStubAssembler::Bind(CodeStubAssembler::Label* label) {
-  return label->Bind();
-}
-
-Node* CodeStubAssembler::LoadFramePointer() {
-  return raw_assembler_->LoadFramePointer();
-}
-
-Node* CodeStubAssembler::LoadParentFramePointer() {
-  return raw_assembler_->LoadParentFramePointer();
-}
-
-Node* CodeStubAssembler::LoadStackPointer() {
-  return raw_assembler_->LoadStackPointer();
-}
-
-Node* CodeStubAssembler::SmiShiftBitsConstant() {
-  return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
-}
-
-Node* CodeStubAssembler::Float64Round(Node* x) {
-  Node* one = Float64Constant(1.0);
-  Node* one_half = Float64Constant(0.5);
-
-  Variable var_x(this, MachineRepresentation::kFloat64);
-  Label return_x(this);
-
-  // Round up {x} towards Infinity.
-  var_x.Bind(Float64Ceil(x));
-
-  GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
-         &return_x);
-  var_x.Bind(Float64Sub(var_x.value(), one));
-  Goto(&return_x);
-
-  Bind(&return_x);
-  return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Ceil(Node* x) {
-  if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
-    return raw_assembler_->Float64RoundUp(x);
-  }
-
-  Node* one = Float64Constant(1.0);
-  Node* zero = Float64Constant(0.0);
-  Node* two_52 = Float64Constant(4503599627370496.0E0);
-  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
-  Variable var_x(this, MachineRepresentation::kFloat64);
-  Label return_x(this), return_minus_x(this);
-  var_x.Bind(x);
-
-  // Check if {x} is greater than zero.
-  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
-  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
-         &if_xnotgreaterthanzero);
-
-  Bind(&if_xgreaterthanzero);
-  {
-    // Just return {x} unless it's in the range ]0,2^52[.
-    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
-    // Round positive {x} towards Infinity.
-    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
-    GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
-    var_x.Bind(Float64Add(var_x.value(), one));
-    Goto(&return_x);
-  }
-
-  Bind(&if_xnotgreaterthanzero);
-  {
-    // Just return {x} unless it's in the range ]-2^52,0[
-    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
-    GotoUnless(Float64LessThan(x, zero), &return_x);
-
-    // Round negated {x} towards Infinity and return the result negated.
-    Node* minus_x = Float64Neg(x);
-    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
-    GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
-    var_x.Bind(Float64Sub(var_x.value(), one));
-    Goto(&return_minus_x);
-  }
-
-  Bind(&return_minus_x);
-  var_x.Bind(Float64Neg(var_x.value()));
-  Goto(&return_x);
-
-  Bind(&return_x);
-  return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Floor(Node* x) {
-  if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
-    return raw_assembler_->Float64RoundDown(x);
-  }
-
-  Node* one = Float64Constant(1.0);
-  Node* zero = Float64Constant(0.0);
-  Node* two_52 = Float64Constant(4503599627370496.0E0);
-  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
-  Variable var_x(this, MachineRepresentation::kFloat64);
-  Label return_x(this), return_minus_x(this);
-  var_x.Bind(x);
-
-  // Check if {x} is greater than zero.
-  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
-  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
-         &if_xnotgreaterthanzero);
-
-  Bind(&if_xgreaterthanzero);
-  {
-    // Just return {x} unless it's in the range ]0,2^52[.
-    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
-    // Round positive {x} towards -Infinity.
-    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
-    GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
-    var_x.Bind(Float64Sub(var_x.value(), one));
-    Goto(&return_x);
-  }
-
-  Bind(&if_xnotgreaterthanzero);
-  {
-    // Just return {x} unless it's in the range ]-2^52,0[
-    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
-    GotoUnless(Float64LessThan(x, zero), &return_x);
-
-    // Round negated {x} towards -Infinity and return the result negated.
-    Node* minus_x = Float64Neg(x);
-    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
-    GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
-    var_x.Bind(Float64Add(var_x.value(), one));
-    Goto(&return_minus_x);
-  }
-
-  Bind(&return_minus_x);
-  var_x.Bind(Float64Neg(var_x.value()));
-  Goto(&return_x);
-
-  Bind(&return_x);
-  return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Trunc(Node* x) {
-  if (raw_assembler_->machine()->Float64RoundTruncate().IsSupported()) {
-    return raw_assembler_->Float64RoundTruncate(x);
-  }
-
-  Node* one = Float64Constant(1.0);
-  Node* zero = Float64Constant(0.0);
-  Node* two_52 = Float64Constant(4503599627370496.0E0);
-  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
-  Variable var_x(this, MachineRepresentation::kFloat64);
-  Label return_x(this), return_minus_x(this);
-  var_x.Bind(x);
-
-  // Check if {x} is greater than 0.
-  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
-  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
-         &if_xnotgreaterthanzero);
-
-  Bind(&if_xgreaterthanzero);
-  {
-    if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
-      var_x.Bind(raw_assembler_->Float64RoundDown(x));
-    } else {
-      // Just return {x} unless it's in the range ]0,2^52[.
-      GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
-      // Round positive {x} towards -Infinity.
-      var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
-      GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
-      var_x.Bind(Float64Sub(var_x.value(), one));
-    }
-    Goto(&return_x);
-  }
-
-  Bind(&if_xnotgreaterthanzero);
-  {
-    if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
-      var_x.Bind(raw_assembler_->Float64RoundUp(x));
-      Goto(&return_x);
-    } else {
-      // Just return {x} unless its in the range ]-2^52,0[.
-      GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
-      GotoUnless(Float64LessThan(x, zero), &return_x);
-
-      // Round negated {x} towards -Infinity and return result negated.
-      Node* minus_x = Float64Neg(x);
-      var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
-      GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
-      var_x.Bind(Float64Sub(var_x.value(), one));
-      Goto(&return_minus_x);
-    }
-  }
-
-  Bind(&return_minus_x);
-  var_x.Bind(Float64Neg(var_x.value()));
-  Goto(&return_x);
-
-  Bind(&return_x);
-  return var_x.value();
-}
-
-Node* CodeStubAssembler::SmiTag(Node* value) {
-  return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
-}
-
-Node* CodeStubAssembler::SmiUntag(Node* value) {
-  return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
-}
-
-Node* CodeStubAssembler::SmiToWord32(Node* value) {
-  Node* result = raw_assembler_->WordSar(value, SmiShiftBitsConstant());
-  if (raw_assembler_->machine()->Is64()) {
-    result = raw_assembler_->TruncateInt64ToInt32(result);
-  }
-  return result;
-}
-
-Node* CodeStubAssembler::SmiToFloat64(Node* value) {
-  return ChangeInt32ToFloat64(SmiUntag(value));
-}
-
-Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
-
-Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
-  return IntPtrAddWithOverflow(a, b);
-}
-
-Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
-
-Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
-  return IntPtrSubWithOverflow(a, b);
-}
-
-Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
-
-Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
-  return IntPtrLessThan(a, b);
-}
-
-Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
-  return IntPtrLessThanOrEqual(a, b);
-}
-
-Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
-  // TODO(bmeurer): Consider using Select once available.
-  Variable min(this, MachineRepresentation::kTagged);
-  Label if_a(this), if_b(this), join(this);
-  BranchIfSmiLessThan(a, b, &if_a, &if_b);
-  Bind(&if_a);
-  min.Bind(a);
-  Goto(&join);
-  Bind(&if_b);
-  min.Bind(b);
-  Goto(&join);
-  Bind(&join);
-  return min.value();
-}
-
-#define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name)   \
-  Node* CodeStubAssembler::name(Node* a, Node* b) { \
-    return raw_assembler_->name(a, b);              \
-  }
-CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
-#undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
-
-Node* CodeStubAssembler::WordShl(Node* value, int shift) {
-  return raw_assembler_->WordShl(value, IntPtrConstant(shift));
-}
-
-#define DEFINE_CODE_STUB_ASSEMBER_UNARY_OP(name) \
-  Node* CodeStubAssembler::name(Node* a) { return raw_assembler_->name(a); }
-CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_UNARY_OP)
-#undef DEFINE_CODE_STUB_ASSEMBER_UNARY_OP
-
-Node* CodeStubAssembler::WordIsSmi(Node* a) {
-  return WordEqual(raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask)),
-                   IntPtrConstant(0));
-}
-
-Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
-  return WordEqual(
-      raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
-      IntPtrConstant(0));
-}
-
-Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
-                                          MachineType rep) {
-  return raw_assembler_->Load(rep, buffer, IntPtrConstant(offset));
-}
-
-Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
-                                         MachineType rep) {
-  return raw_assembler_->Load(rep, object,
-                              IntPtrConstant(offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
-  return Load(MachineType::Float64(), object,
-              IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
-  return StoreNoWriteBarrier(
-      MachineRepresentation::kFloat64, object,
-      IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
-}
-
-Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
-  Node* value = LoadHeapNumberValue(object);
-  return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript,
-                                                value);
-}
-
-Node* CodeStubAssembler::LoadMapBitField(Node* map) {
-  return Load(MachineType::Uint8(), map,
-              IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
-  return Load(MachineType::Uint8(), map,
-              IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
-  return Load(MachineType::Uint32(), map,
-              IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
-  return Load(MachineType::Uint8(), map,
-              IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
-  return LoadObjectField(map, Map::kDescriptorsOffset);
-}
-
-Node* CodeStubAssembler::LoadNameHash(Node* name) {
-  return Load(MachineType::Uint32(), name,
-              IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementInt32Index(
-    Node* object, Node* int32_index, int additional_offset) {
-  Node* header_size = IntPtrConstant(additional_offset +
-                                     FixedArray::kHeaderSize - kHeapObjectTag);
-  Node* scaled_index = WordShl(int32_index, IntPtrConstant(kPointerSizeLog2));
-  Node* offset = IntPtrAdd(scaled_index, header_size);
-  return Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
-                                                       Node* smi_index,
-                                                       int additional_offset) {
-  int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
-  Node* header_size = IntPtrConstant(additional_offset +
-                                     FixedArray::kHeaderSize - kHeapObjectTag);
-  Node* scaled_index =
-      (kSmiShiftBits > kPointerSizeLog2)
-          ? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2))
-          : WordShl(smi_index,
-                    IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits));
-  Node* offset = IntPtrAdd(scaled_index, header_size);
-  return Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
-                                                            int index) {
-  Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag +
-                                index * kPointerSize);
-  return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::StoreFixedArrayElementNoWriteBarrier(Node* object,
-                                                              Node* index,
-                                                              Node* value) {
-  Node* offset =
-      IntPtrAdd(WordShl(index, IntPtrConstant(kPointerSizeLog2)),
-                IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
-  return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
-                             value);
-}
-
-Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
-  if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
-    Handle<Object> root = isolate()->heap()->root_handle(root_index);
-    if (root->IsSmi()) {
-      return SmiConstant(Smi::cast(*root));
-    } else {
-      return HeapConstant(Handle<HeapObject>::cast(root));
-    }
-  }
-
-  compiler::Node* roots_array_start =
-      ExternalConstant(ExternalReference::roots_array_start(isolate()));
-  USE(roots_array_start);
-
-  // TODO(danno): Implement thee root-access case where the root is not constant
-  // and must be loaded from the root array.
-  UNIMPLEMENTED();
-  return nullptr;
-}
-
-Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
-                                              AllocationFlags flags,
-                                              Node* top_address,
-                                              Node* limit_address) {
-  Node* top = Load(MachineType::Pointer(), top_address);
-  Node* limit = Load(MachineType::Pointer(), limit_address);
-
-  // If there's not enough space, call the runtime.
-  RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
-      merge_runtime;
-  raw_assembler_->Branch(
-      raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
-      &runtime_call, &no_runtime_call);
-
-  raw_assembler_->Bind(&runtime_call);
-  // AllocateInTargetSpace does not use the context.
-  Node* context = IntPtrConstant(0);
-  Node* runtime_flags = SmiTag(Int32Constant(
-      AllocateDoubleAlignFlag::encode(false) |
-      AllocateTargetSpace::encode(flags & kPretenured
-                                      ? AllocationSpace::OLD_SPACE
-                                      : AllocationSpace::NEW_SPACE)));
-  Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
-                                     SmiTag(size_in_bytes), runtime_flags);
-  raw_assembler_->Goto(&merge_runtime);
-
-  // When there is enough space, return `top' and bump it up.
-  raw_assembler_->Bind(&no_runtime_call);
-  Node* no_runtime_result = top;
-  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
-                      IntPtrAdd(top, size_in_bytes));
-  no_runtime_result =
-      IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
-  raw_assembler_->Goto(&merge_runtime);
-
-  raw_assembler_->Bind(&merge_runtime);
-  return raw_assembler_->Phi(MachineType::PointerRepresentation(),
-                             runtime_result, no_runtime_result);
-}
-
-Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
-                                            AllocationFlags flags,
-                                            Node* top_address,
-                                            Node* limit_address) {
-  Node* top = Load(MachineType::Pointer(), top_address);
-  Node* limit = Load(MachineType::Pointer(), limit_address);
-  Node* adjusted_size = size_in_bytes;
-  if (flags & kDoubleAlignment) {
-    // TODO(epertoso): Simd128 alignment.
-    RawMachineLabel aligned, not_aligned, merge;
-    raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
-                           &not_aligned, &aligned);
-
-    raw_assembler_->Bind(&not_aligned);
-    Node* not_aligned_size =
-        IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
-    raw_assembler_->Goto(&merge);
-
-    raw_assembler_->Bind(&aligned);
-    raw_assembler_->Goto(&merge);
-
-    raw_assembler_->Bind(&merge);
-    adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
-                                        not_aligned_size, adjusted_size);
-  }
-
-  Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
-
-  RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
-  raw_assembler_->Branch(
-      raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
-      &doesnt_need_filler, &needs_filler);
-
-  raw_assembler_->Bind(&needs_filler);
-  // Store a filler and increase the address by kPointerSize.
-  // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
-  // it when Simd128 alignment is supported.
-  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
-                      LoadRoot(Heap::kOnePointerFillerMapRootIndex));
-  Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
-  raw_assembler_->Goto(&merge_address);
-
-  raw_assembler_->Bind(&doesnt_need_filler);
-  Node* address_without_filler = address;
-  raw_assembler_->Goto(&merge_address);
-
-  raw_assembler_->Bind(&merge_address);
-  address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
-                                address_with_filler, address_without_filler);
-  // Update the top.
-  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
-                      IntPtrAdd(top, adjusted_size));
-  return address;
-}
-
-Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
-  bool const new_space = !(flags & kPretenured);
-  Node* top_address = ExternalConstant(
-      new_space
-          ? ExternalReference::new_space_allocation_top_address(isolate())
-          : ExternalReference::old_space_allocation_top_address(isolate()));
-  Node* limit_address = ExternalConstant(
-      new_space
-          ? ExternalReference::new_space_allocation_limit_address(isolate())
-          : ExternalReference::old_space_allocation_limit_address(isolate()));
-
-#ifdef V8_HOST_ARCH_32_BIT
-  if (flags & kDoubleAlignment) {
-    return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
-                              limit_address);
-  }
-#endif
-
-  return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
-                              limit_address);
-}
-
-Node* CodeStubAssembler::AllocateHeapNumber() {
-  Node* result = Allocate(HeapNumber::kSize, kNone);
-  StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
-  return result;
-}
-
-Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
-  Node* result = AllocateHeapNumber();
-  StoreHeapNumberValue(result, value);
-  return result;
-}
-
-Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
-  return raw_assembler_->Load(rep, base);
-}
-
-Node* CodeStubAssembler::Load(MachineType rep, Node* base, Node* index) {
-  return raw_assembler_->Load(rep, base, index);
-}
-
-Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
-                               Node* value) {
-  return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
-}
-
-Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
-                               Node* index, Node* value) {
-  return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
-}
-
-Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
-                                             Node* base, Node* value) {
-  return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
-}
-
-Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
-                                             Node* base, Node* index,
-                                             Node* value) {
-  return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
-}
-
-Node* CodeStubAssembler::Projection(int index, Node* value) {
-  return raw_assembler_->Projection(index, value);
-}
-
-Node* CodeStubAssembler::LoadMap(Node* object) {
-  return LoadObjectField(object, HeapObject::kMapOffset);
-}
-
-Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
-  return StoreNoWriteBarrier(
-      MachineRepresentation::kTagged, object,
-      IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
-}
-
-Node* CodeStubAssembler::LoadInstanceType(Node* object) {
-  return LoadMapInstanceType(LoadMap(object));
-}
-
-Node* CodeStubAssembler::LoadElements(Node* object) {
-  return LoadObjectField(object, JSObject::kElementsOffset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
-  return LoadObjectField(array, FixedArrayBase::kLengthOffset);
-}
-
-Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
-                                        uint32_t mask) {
-  return raw_assembler_->Word32Shr(
-      raw_assembler_->Word32And(word32, raw_assembler_->Int32Constant(mask)),
-      raw_assembler_->Int32Constant(shift));
-}
-
-Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
-  Node* value32 = raw_assembler_->TruncateFloat64ToInt32(
-      TruncationMode::kRoundToZero, value);
-  Node* value64 = ChangeInt32ToFloat64(value32);
-
-  Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
-
-  Label if_valueisequal(this), if_valueisnotequal(this);
-  Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
-  Bind(&if_valueisequal);
-  {
-    Label if_valueiszero(this), if_valueisnotzero(this);
-    Branch(Float64Equal(value, Float64Constant(0.0)), &if_valueiszero,
-           &if_valueisnotzero);
-
-    Bind(&if_valueiszero);
-    BranchIfInt32LessThan(raw_assembler_->Float64ExtractHighWord32(value),
-                          Int32Constant(0), &if_valueisheapnumber,
-                          &if_valueisint32);
-
-    Bind(&if_valueisnotzero);
-    Goto(&if_valueisint32);
-  }
-  Bind(&if_valueisnotequal);
-  Goto(&if_valueisheapnumber);
-
-  Variable var_result(this, MachineRepresentation::kTagged);
-  Bind(&if_valueisint32);
-  {
-    if (raw_assembler_->machine()->Is64()) {
-      Node* result = SmiTag(ChangeInt32ToInt64(value32));
-      var_result.Bind(result);
-      Goto(&if_join);
-    } else {
-      Node* pair = Int32AddWithOverflow(value32, value32);
-      Node* overflow = Projection(1, pair);
-      Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
-      Branch(overflow, &if_overflow, &if_notoverflow);
-      Bind(&if_overflow);
-      Goto(&if_valueisheapnumber);
-      Bind(&if_notoverflow);
-      {
-        Node* result = Projection(0, pair);
-        var_result.Bind(result);
-        Goto(&if_join);
-      }
-    }
-  }
-  Bind(&if_valueisheapnumber);
-  {
-    Node* result = AllocateHeapNumberWithValue(value);
-    var_result.Bind(result);
-    Goto(&if_join);
-  }
-  Bind(&if_join);
-  return var_result.value();
-}
-
-Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
-  if (raw_assembler_->machine()->Is64()) {
-    return SmiTag(ChangeInt32ToInt64(value));
-  }
-  Variable var_result(this, MachineRepresentation::kTagged);
-  Node* pair = Int32AddWithOverflow(value, value);
-  Node* overflow = Projection(1, pair);
-  Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
-      if_join(this);
-  Branch(overflow, &if_overflow, &if_notoverflow);
-  Bind(&if_overflow);
-  {
-    Node* value64 = ChangeInt32ToFloat64(value);
-    Node* result = AllocateHeapNumberWithValue(value64);
-    var_result.Bind(result);
-  }
-  Goto(&if_join);
-  Bind(&if_notoverflow);
-  {
-    Node* result = Projection(0, pair);
-    var_result.Bind(result);
-  }
-  Goto(&if_join);
-  Bind(&if_join);
-  return var_result.value();
-}
-
-Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
-  // We might need to loop once due to ToNumber conversion.
-  Variable var_value(this, MachineRepresentation::kTagged),
-      var_result(this, MachineRepresentation::kFloat64);
-  Label loop(this, &var_value), done_loop(this, &var_result);
-  var_value.Bind(value);
-  Goto(&loop);
-  Bind(&loop);
-  {
-    // Load the current {value}.
-    value = var_value.value();
-
-    // Check if the {value} is a Smi or a HeapObject.
-    Label if_valueissmi(this), if_valueisnotsmi(this);
-    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
-    Bind(&if_valueissmi);
-    {
-      // Convert the Smi {value}.
-      var_result.Bind(SmiToFloat64(value));
-      Goto(&done_loop);
-    }
-
-    Bind(&if_valueisnotsmi);
-    {
-      // Check if {value} is a HeapNumber.
-      Label if_valueisheapnumber(this),
-          if_valueisnotheapnumber(this, Label::kDeferred);
-      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
-             &if_valueisheapnumber, &if_valueisnotheapnumber);
-
-      Bind(&if_valueisheapnumber);
-      {
-        // Load the floating point value.
-        var_result.Bind(LoadHeapNumberValue(value));
-        Goto(&done_loop);
-      }
-
-      Bind(&if_valueisnotheapnumber);
-      {
-        // Convert the {value} to a Number first.
-        Callable callable = CodeFactory::NonNumberToNumber(isolate());
-        var_value.Bind(CallStub(callable, context, value));
-        Goto(&loop);
-      }
-    }
-  }
-  Bind(&done_loop);
-  return var_result.value();
-}
-
-Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
-  // We might need to loop once due to ToNumber conversion.
-  Variable var_value(this, MachineRepresentation::kTagged),
-      var_result(this, MachineRepresentation::kWord32);
-  Label loop(this, &var_value), done_loop(this, &var_result);
-  var_value.Bind(value);
-  Goto(&loop);
-  Bind(&loop);
-  {
-    // Load the current {value}.
-    value = var_value.value();
-
-    // Check if the {value} is a Smi or a HeapObject.
-    Label if_valueissmi(this), if_valueisnotsmi(this);
-    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
-    Bind(&if_valueissmi);
-    {
-      // Convert the Smi {value}.
-      var_result.Bind(SmiToWord32(value));
-      Goto(&done_loop);
-    }
-
-    Bind(&if_valueisnotsmi);
-    {
-      // Check if {value} is a HeapNumber.
-      Label if_valueisheapnumber(this),
-          if_valueisnotheapnumber(this, Label::kDeferred);
-      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
-             &if_valueisheapnumber, &if_valueisnotheapnumber);
-
-      Bind(&if_valueisheapnumber);
-      {
-        // Truncate the floating point value.
-        var_result.Bind(TruncateHeapNumberValueToWord32(value));
-        Goto(&done_loop);
-      }
-
-      Bind(&if_valueisnotheapnumber);
-      {
-        // Convert the {value} to a Number first.
-        Callable callable = CodeFactory::NonNumberToNumber(isolate());
-        var_value.Bind(CallStub(callable, context, value));
-        Goto(&loop);
-      }
-    }
-  }
-  Bind(&done_loop);
-  return var_result.value();
-}
-
-void CodeStubAssembler::BranchIf(Node* condition, Label* if_true,
-                                 Label* if_false) {
-  Label if_condition_is_true(this), if_condition_is_false(this);
-  Branch(condition, &if_condition_is_true, &if_condition_is_false);
-  Bind(&if_condition_is_true);
-  Goto(if_true);
-  Bind(&if_condition_is_false);
-  Goto(if_false);
-}
-
-Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
-                               Node** args) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
-  CallEpilogue();
-  return return_value;
-}
-
-
-Node* CodeStubAssembler::TailCallN(CallDescriptor* descriptor,
-                                   Node* code_target, Node** args) {
-  return raw_assembler_->TailCallN(descriptor, code_target, args);
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                     Node* context) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2) {
-  CallPrologue();
-  Node* return_value =
-      raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3) {
-  CallPrologue();
-  Node* return_value =
-      raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3, Node* arg4) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
-                                                    arg3, arg4, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                         Node* context) {
-  return raw_assembler_->TailCallRuntime0(function_id, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                         Node* context, Node* arg1) {
-  return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                         Node* context, Node* arg1,
-                                         Node* arg2) {
-  return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                         Node* context, Node* arg1, Node* arg2,
-                                         Node* arg3) {
-  return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
-                                          context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                         Node* context, Node* arg1, Node* arg2,
-                                         Node* arg3, Node* arg4) {
-  return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
-                                          context);
-}
-
-Node* CodeStubAssembler::CallStub(Callable const& callable, Node* context,
-                                  Node* arg1, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, result_size);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(2);
-  args[0] = arg1;
-  args[1] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(3);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, Node* arg4,
-                                  size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, Node* arg4,
-                                  Node* arg5, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(6);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::TailCallStub(Callable const& callable, Node* context,
-                                      Node* arg1, Node* arg2,
-                                      size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
-                      result_size);
-}
-
-Node* CodeStubAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                      Node* target, Node* context, Node* arg1,
-                                      Node* arg2, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(3);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::TailCall(
-    const CallInterfaceDescriptor& interface_descriptor, Node* code_target,
-    Node** args, size_t result_size) {
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), interface_descriptor,
-      interface_descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-  return raw_assembler_->TailCallN(descriptor, code_target, args);
-}
-
-void CodeStubAssembler::Goto(CodeStubAssembler::Label* label) {
-  label->MergeVariables();
-  raw_assembler_->Goto(label->label_);
-}
-
-void CodeStubAssembler::GotoIf(Node* condition, Label* true_label) {
-  Label false_label(this);
-  Branch(condition, true_label, &false_label);
-  Bind(&false_label);
-}
-
-void CodeStubAssembler::GotoUnless(Node* condition, Label* false_label) {
-  Label true_label(this);
-  Branch(condition, &true_label, false_label);
-  Bind(&true_label);
-}
-
-void CodeStubAssembler::Branch(Node* condition,
-                               CodeStubAssembler::Label* true_label,
-                               CodeStubAssembler::Label* false_label) {
-  true_label->MergeVariables();
-  false_label->MergeVariables();
-  return raw_assembler_->Branch(condition, true_label->label_,
-                                false_label->label_);
-}
-
-void CodeStubAssembler::Switch(Node* index, Label* default_label,
-                               int32_t* case_values, Label** case_labels,
-                               size_t case_count) {
-  RawMachineLabel** labels =
-      new (zone()->New(sizeof(RawMachineLabel*) * case_count))
-          RawMachineLabel*[case_count];
-  for (size_t i = 0; i < case_count; ++i) {
-    labels[i] = case_labels[i]->label_;
-    case_labels[i]->MergeVariables();
-    default_label->MergeVariables();
-  }
-  return raw_assembler_->Switch(index, default_label->label_, case_values,
-                                labels, case_count);
-}
-
-// RawMachineAssembler delegate helpers:
-Isolate* CodeStubAssembler::isolate() const {
-  return raw_assembler_->isolate();
-}
-
-Factory* CodeStubAssembler::factory() const { return isolate()->factory(); }
-
-Graph* CodeStubAssembler::graph() const { return raw_assembler_->graph(); }
-
-Zone* CodeStubAssembler::zone() const { return raw_assembler_->zone(); }
-
-// The core implementation of Variable is stored through an indirection so
-// that it can outlive the often block-scoped Variable declarations. This is
-// needed to ensure that variable binding and merging through phis can
-// properly be verified.
-class CodeStubAssembler::Variable::Impl : public ZoneObject {
- public:
-  explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
-  Node* value_;
-  MachineRepresentation rep_;
-};
-
-CodeStubAssembler::Variable::Variable(CodeStubAssembler* assembler,
-                                      MachineRepresentation rep)
-    : impl_(new (assembler->zone()) Impl(rep)) {
-  assembler->variables_.push_back(impl_);
-}
-
-void CodeStubAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
-
-Node* CodeStubAssembler::Variable::value() const {
-  DCHECK_NOT_NULL(impl_->value_);
-  return impl_->value_;
-}
-
-MachineRepresentation CodeStubAssembler::Variable::rep() const {
-  return impl_->rep_;
-}
-
-bool CodeStubAssembler::Variable::IsBound() const {
-  return impl_->value_ != nullptr;
-}
-
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
-                                int merged_value_count,
-                                CodeStubAssembler::Variable** merged_variables,
-                                CodeStubAssembler::Label::Type type)
-    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
-  void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
-  label_ = new (buffer)
-      RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
-                                        : RawMachineLabel::kNonDeferred);
-  for (int i = 0; i < merged_value_count; ++i) {
-    variable_phis_[merged_variables[i]->impl_] = nullptr;
-  }
-}
-
-void CodeStubAssembler::Label::MergeVariables() {
-  ++merge_count_;
-  for (auto var : assembler_->variables_) {
-    size_t count = 0;
-    Node* node = var->value_;
-    if (node != nullptr) {
-      auto i = variable_merges_.find(var);
-      if (i != variable_merges_.end()) {
-        i->second.push_back(node);
-        count = i->second.size();
-      } else {
-        count = 1;
-        variable_merges_[var] = std::vector<Node*>(1, node);
-      }
-    }
-    // If the following asserts, then you've jumped to a label without a bound
-    // variable along that path that expects to merge its value into a phi.
-    DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
-           count == merge_count_);
-    USE(count);
-
-    // If the label is already bound, we already know the set of variables to
-    // merge and phi nodes have already been created.
-    if (bound_) {
-      auto phi = variable_phis_.find(var);
-      if (phi != variable_phis_.end()) {
-        DCHECK_NOT_NULL(phi->second);
-        assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
-      } else {
-        auto i = variable_merges_.find(var);
-        if (i != variable_merges_.end()) {
-          // If the following assert fires, then you've declared a variable that
-          // has the same bound value along all paths up until the point you
-          // bound this label, but then later merged a path with a new value for
-          // the variable after the label bind (it's not possible to add phis to
-          // the bound label after the fact, just make sure to list the variable
-          // in the label's constructor's list of merged variables).
-          DCHECK(find_if(i->second.begin(), i->second.end(),
-                         [node](Node* e) -> bool { return node != e; }) ==
-                 i->second.end());
-        }
-      }
-    }
-  }
-}
-
-void CodeStubAssembler::Label::Bind() {
-  DCHECK(!bound_);
-  assembler_->raw_assembler_->Bind(label_);
-
-  // Make sure that all variables that have changed along any path up to this
-  // point are marked as merge variables.
-  for (auto var : assembler_->variables_) {
-    Node* shared_value = nullptr;
-    auto i = variable_merges_.find(var);
-    if (i != variable_merges_.end()) {
-      for (auto value : i->second) {
-        DCHECK(value != nullptr);
-        if (value != shared_value) {
-          if (shared_value == nullptr) {
-            shared_value = value;
-          } else {
-            variable_phis_[var] = nullptr;
-          }
-        }
-      }
-    }
-  }
-
-  for (auto var : variable_phis_) {
-    CodeStubAssembler::Variable::Impl* var_impl = var.first;
-    auto i = variable_merges_.find(var_impl);
-    // If the following assert fires, then a variable that has been marked as
-    // being merged at the label--either by explicitly marking it so in the
-    // label constructor or by having seen different bound values at branches
-    // into the label--doesn't have a bound value along all of the paths that
-    // have been merged into the label up to this point.
-    DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
-    Node* phi = assembler_->raw_assembler_->Phi(
-        var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
-    variable_phis_[var_impl] = phi;
-  }
-
-  // Bind all variables to a merge phi, the common value along all paths or
-  // null.
-  for (auto var : assembler_->variables_) {
-    auto i = variable_phis_.find(var);
-    if (i != variable_phis_.end()) {
-      var->value_ = i->second;
-    } else {
-      auto j = variable_merges_.find(var);
-      if (j != variable_merges_.end() && j->second.size() == merge_count_) {
-        var->value_ = j->second.back();
-      } else {
-        var->value_ = nullptr;
-      }
-    }
-  }
-
-  bound_ = true;
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/code-stub-assembler.h b/src/compiler/code-stub-assembler.h
deleted file mode 100644
index 9fcb890..0000000
--- a/src/compiler/code-stub-assembler.h
+++ /dev/null
@@ -1,475 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
-#define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
-
-#include <map>
-
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
-#include "src/allocation.h"
-#include "src/builtins.h"
-#include "src/heap/heap.h"
-#include "src/machine-type.h"
-#include "src/runtime/runtime.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class Callable;
-class CallInterfaceDescriptor;
-class Isolate;
-class Factory;
-class Zone;
-
-namespace compiler {
-
-class CallDescriptor;
-class Graph;
-class Node;
-class Operator;
-class RawMachineAssembler;
-class RawMachineLabel;
-class Schedule;
-
-#define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
-  V(Float32Equal)                                     \
-  V(Float32LessThan)                                  \
-  V(Float32LessThanOrEqual)                           \
-  V(Float32GreaterThan)                               \
-  V(Float32GreaterThanOrEqual)                        \
-  V(Float64Equal)                                     \
-  V(Float64LessThan)                                  \
-  V(Float64LessThanOrEqual)                           \
-  V(Float64GreaterThan)                               \
-  V(Float64GreaterThanOrEqual)                        \
-  V(Int32GreaterThan)                                 \
-  V(Int32GreaterThanOrEqual)                          \
-  V(Int32LessThan)                                    \
-  V(Int32LessThanOrEqual)                             \
-  V(IntPtrLessThan)                                   \
-  V(IntPtrLessThanOrEqual)                            \
-  V(Uint32LessThan)                                   \
-  V(UintPtrGreaterThanOrEqual)                        \
-  V(WordEqual)                                        \
-  V(WordNotEqual)                                     \
-  V(Word32Equal)                                      \
-  V(Word32NotEqual)                                   \
-  V(Word64Equal)                                      \
-  V(Word64NotEqual)
-
-#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V)   \
-  CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
-  V(Float64Add)                                 \
-  V(Float64Sub)                                 \
-  V(Float64InsertLowWord32)                     \
-  V(Float64InsertHighWord32)                    \
-  V(IntPtrAdd)                                  \
-  V(IntPtrAddWithOverflow)                      \
-  V(IntPtrSub)                                  \
-  V(IntPtrSubWithOverflow)                      \
-  V(Int32Add)                                   \
-  V(Int32AddWithOverflow)                       \
-  V(Int32Sub)                                   \
-  V(Int32Mul)                                   \
-  V(WordOr)                                     \
-  V(WordAnd)                                    \
-  V(WordXor)                                    \
-  V(WordShl)                                    \
-  V(WordShr)                                    \
-  V(WordSar)                                    \
-  V(WordRor)                                    \
-  V(Word32Or)                                   \
-  V(Word32And)                                  \
-  V(Word32Xor)                                  \
-  V(Word32Shl)                                  \
-  V(Word32Shr)                                  \
-  V(Word32Sar)                                  \
-  V(Word32Ror)                                  \
-  V(Word64Or)                                   \
-  V(Word64And)                                  \
-  V(Word64Xor)                                  \
-  V(Word64Shr)                                  \
-  V(Word64Sar)                                  \
-  V(Word64Ror)
-
-#define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
-  V(Float64Neg)                              \
-  V(Float64Sqrt)                             \
-  V(ChangeFloat64ToUint32)                   \
-  V(ChangeInt32ToFloat64)                    \
-  V(ChangeInt32ToInt64)                      \
-  V(ChangeUint32ToFloat64)                   \
-  V(ChangeUint32ToUint64)                    \
-  V(Word32Clz)
-
-class CodeStubAssembler {
- public:
-  // Create with CallStub linkage.
-  // |result_size| specifies the number of results returned by the stub.
-  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
-  CodeStubAssembler(Isolate* isolate, Zone* zone,
-                    const CallInterfaceDescriptor& descriptor,
-                    Code::Flags flags, const char* name,
-                    size_t result_size = 1);
-
-  // Create with JSCall linkage.
-  CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
-                    Code::Flags flags, const char* name);
-
-  virtual ~CodeStubAssembler();
-
-  Handle<Code> GenerateCode();
-
-  class Label;
-  class Variable {
-   public:
-    explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep);
-    void Bind(Node* value);
-    Node* value() const;
-    MachineRepresentation rep() const;
-    bool IsBound() const;
-
-   private:
-    friend class CodeStubAssembler;
-    class Impl;
-    Impl* impl_;
-  };
-
-  enum AllocationFlag : uint8_t {
-    kNone = 0,
-    kDoubleAlignment = 1,
-    kPretenured = 1 << 1
-  };
-
-  typedef base::Flags<AllocationFlag> AllocationFlags;
-
-  // ===========================================================================
-  // Base Assembler
-  // ===========================================================================
-
-  // Constants.
-  Node* Int32Constant(int value);
-  Node* IntPtrConstant(intptr_t value);
-  Node* NumberConstant(double value);
-  Node* SmiConstant(Smi* value);
-  Node* HeapConstant(Handle<HeapObject> object);
-  Node* BooleanConstant(bool value);
-  Node* ExternalConstant(ExternalReference address);
-  Node* Float64Constant(double value);
-  Node* BooleanMapConstant();
-  Node* HeapNumberMapConstant();
-  Node* NullConstant();
-  Node* UndefinedConstant();
-
-  Node* Parameter(int value);
-  void Return(Node* value);
-
-  void Bind(Label* label);
-  void Goto(Label* label);
-  void GotoIf(Node* condition, Label* true_label);
-  void GotoUnless(Node* condition, Label* false_label);
-  void Branch(Node* condition, Label* true_label, Label* false_label);
-
-  void Switch(Node* index, Label* default_label, int32_t* case_values,
-              Label** case_labels, size_t case_count);
-
-  // Access to the frame pointer
-  Node* LoadFramePointer();
-  Node* LoadParentFramePointer();
-
-  // Access to the stack pointer
-  Node* LoadStackPointer();
-
-  // Load raw memory location.
-  Node* Load(MachineType rep, Node* base);
-  Node* Load(MachineType rep, Node* base, Node* index);
-
-  // Store value to raw memory location.
-  Node* Store(MachineRepresentation rep, Node* base, Node* value);
-  Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
-  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
-  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
-                            Node* value);
-
-// Basic arithmetic operations.
-#define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b);
-  CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
-#undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP
-
-  Node* WordShl(Node* value, int shift);
-
-// Unary
-#define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a);
-  CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP)
-#undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP
-
-  // Projections
-  Node* Projection(int index, Node* value);
-
-  // Calls
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3, Node* arg4);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3, Node* arg4, Node* arg5);
-
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3, Node* arg4);
-
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 size_t result_size = 1);
-
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3,
-                 size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                 size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                 Node* arg5, size_t result_size = 1);
-
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, size_t result_size = 1);
-
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2,
-                     size_t result_size = 1);
-
-  Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node** args, size_t result_size = 1);
-
-  // ===========================================================================
-  // Macros
-  // ===========================================================================
-
-  // Float64 operations.
-  Node* Float64Ceil(Node* x);
-  Node* Float64Floor(Node* x);
-  Node* Float64Round(Node* x);
-  Node* Float64Trunc(Node* x);
-
-  // Tag a Word as a Smi value.
-  Node* SmiTag(Node* value);
-  // Untag a Smi value as a Word.
-  Node* SmiUntag(Node* value);
-
-  // Smi conversions.
-  Node* SmiToFloat64(Node* value);
-  Node* SmiToWord32(Node* value);
-
-  // Smi operations.
-  Node* SmiAdd(Node* a, Node* b);
-  Node* SmiAddWithOverflow(Node* a, Node* b);
-  Node* SmiSub(Node* a, Node* b);
-  Node* SmiSubWithOverflow(Node* a, Node* b);
-  Node* SmiEqual(Node* a, Node* b);
-  Node* SmiLessThan(Node* a, Node* b);
-  Node* SmiLessThanOrEqual(Node* a, Node* b);
-  Node* SmiMin(Node* a, Node* b);
-
-  // Load a value from the root array.
-  Node* LoadRoot(Heap::RootListIndex root_index);
-
-  // Check a value for smi-ness
-  Node* WordIsSmi(Node* a);
-
-  // Check that the value is a positive smi.
-  Node* WordIsPositiveSmi(Node* a);
-
-  // Load an object pointer from a buffer that isn't in the heap.
-  Node* LoadBufferObject(Node* buffer, int offset,
-                         MachineType rep = MachineType::AnyTagged());
-  // Load a field from an object on the heap.
-  Node* LoadObjectField(Node* object, int offset,
-                        MachineType rep = MachineType::AnyTagged());
-  // Load the floating point value of a HeapNumber.
-  Node* LoadHeapNumberValue(Node* object);
-  // Store the floating point value of a HeapNumber.
-  Node* StoreHeapNumberValue(Node* object, Node* value);
-  // Truncate the floating point value of a HeapNumber to an Int32.
-  Node* TruncateHeapNumberValueToWord32(Node* object);
-  // Load the bit field of a Map.
-  Node* LoadMapBitField(Node* map);
-  // Load bit field 2 of a map.
-  Node* LoadMapBitField2(Node* map);
-  // Load bit field 3 of a map.
-  Node* LoadMapBitField3(Node* map);
-  // Load the instance type of a map.
-  Node* LoadMapInstanceType(Node* map);
-  // Load the instance descriptors of a map.
-  Node* LoadMapDescriptors(Node* map);
-
-  // Load the hash field of a name.
-  Node* LoadNameHash(Node* name);
-
-  // Load an array element from a FixedArray.
-  Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index,
-                                        int additional_offset = 0);
-  Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
-                                      int additional_offset = 0);
-  Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
-
-  // Allocate an object of the given size.
-  Node* Allocate(int size, AllocationFlags flags = kNone);
-  // Allocate a HeapNumber without initializing its value.
-  Node* AllocateHeapNumber();
-  // Allocate a HeapNumber with a specific value.
-  Node* AllocateHeapNumberWithValue(Node* value);
-
-  // Store an array element to a FixedArray.
-  Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
-                                             Node* value);
-  // Load the Map of an HeapObject.
-  Node* LoadMap(Node* object);
-  // Store the Map of an HeapObject.
-  Node* StoreMapNoWriteBarrier(Node* object, Node* map);
-  // Load the instance type of an HeapObject.
-  Node* LoadInstanceType(Node* object);
-
-  // Load the elements backing store of a JSObject.
-  Node* LoadElements(Node* object);
-  // Load the length of a fixed array base instance.
-  Node* LoadFixedArrayBaseLength(Node* array);
-
-  // Returns a node that is true if the given bit is set in |word32|.
-  template <typename T>
-  Node* BitFieldDecode(Node* word32) {
-    return BitFieldDecode(word32, T::kShift, T::kMask);
-  }
-
-  Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
-
-  // Conversions.
-  Node* ChangeFloat64ToTagged(Node* value);
-  Node* ChangeInt32ToTagged(Node* value);
-  Node* TruncateTaggedToFloat64(Node* context, Node* value);
-  Node* TruncateTaggedToWord32(Node* context, Node* value);
-
-  // Branching helpers.
-  // TODO(danno): Can we be more cleverish wrt. edge-split?
-  void BranchIf(Node* condition, Label* if_true, Label* if_false);
-
-#define BRANCH_HELPER(name)                                                \
-  void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
-    BranchIf(name(a, b), if_true, if_false);                               \
-  }
-  CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
-#undef BRANCH_HELPER
-
-  void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
-    BranchIf(SmiLessThan(a, b), if_true, if_false);
-  }
-
-  void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
-                                  Label* if_false) {
-    BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
-  }
-
-  void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
-    BranchIfFloat64Equal(value, value, if_false, if_true);
-  }
-
-  // Helpers which delegate to RawMachineAssembler.
-  Factory* factory() const;
-  Isolate* isolate() const;
-  Zone* zone() const;
-
- protected:
-  // Protected helpers which delegate to RawMachineAssembler.
-  Graph* graph() const;
-
-  // Enables subclasses to perform operations before and after a call.
-  virtual void CallPrologue();
-  virtual void CallEpilogue();
-
- private:
-  friend class CodeStubAssemblerTester;
-
-  CodeStubAssembler(Isolate* isolate, Zone* zone,
-                    CallDescriptor* call_descriptor, Code::Flags flags,
-                    const char* name);
-
-  Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
-  Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
-
-  Node* SmiShiftBitsConstant();
-
-  Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
-                           Node* top_address, Node* limit_address);
-  Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
-                             Node* top_adddress, Node* limit_address);
-
-  base::SmartPointer<RawMachineAssembler> raw_assembler_;
-  Code::Flags flags_;
-  const char* name_;
-  bool code_generated_;
-  ZoneVector<Variable::Impl*> variables_;
-
-  DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
-
-class CodeStubAssembler::Label {
- public:
-  enum Type { kDeferred, kNonDeferred };
-
-  explicit Label(CodeStubAssembler* assembler,
-                 CodeStubAssembler::Label::Type type =
-                     CodeStubAssembler::Label::kNonDeferred)
-      : CodeStubAssembler::Label(assembler, 0, nullptr, type) {}
-  Label(CodeStubAssembler* assembler,
-        CodeStubAssembler::Variable* merged_variable,
-        CodeStubAssembler::Label::Type type =
-            CodeStubAssembler::Label::kNonDeferred)
-      : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {}
-  Label(CodeStubAssembler* assembler, int merged_variable_count,
-        CodeStubAssembler::Variable** merged_variables,
-        CodeStubAssembler::Label::Type type =
-            CodeStubAssembler::Label::kNonDeferred);
-  ~Label() {}
-
- private:
-  friend class CodeStubAssembler;
-
-  void Bind();
-  void MergeVariables();
-
-  bool bound_;
-  size_t merge_count_;
-  CodeStubAssembler* assembler_;
-  RawMachineLabel* label_;
-  // Map of variables that need to be merged to their phi nodes (or placeholders
-  // for those phis).
-  std::map<Variable::Impl*, Node*> variable_phis_;
-  // Map of variables to the list of value nodes that have been added from each
-  // merge path in their order of merging.
-  std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_CODE_STUB_ASSEMBLER_H_
diff --git a/src/compiler/common-node-cache.cc b/src/compiler/common-node-cache.cc
index a0ae6e8..fa4ca34 100644
--- a/src/compiler/common-node-cache.cc
+++ b/src/compiler/common-node-cache.cc
@@ -17,7 +17,7 @@
 
 
 Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
-  return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
+  return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
 }
 
 
@@ -29,6 +29,8 @@
   external_constants_.GetCachedNodes(nodes);
   number_constants_.GetCachedNodes(nodes);
   heap_constants_.GetCachedNodes(nodes);
+  relocatable_int32_constants_.GetCachedNodes(nodes);
+  relocatable_int64_constants_.GetCachedNodes(nodes);
 }
 
 }  // namespace compiler
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
index 720bc15..cee0c4e 100644
--- a/src/compiler/common-node-cache.h
+++ b/src/compiler/common-node-cache.h
@@ -52,6 +52,14 @@
 
   Node** FindHeapConstant(Handle<HeapObject> value);
 
+  Node** FindRelocatableInt32Constant(int32_t value) {
+    return relocatable_int32_constants_.Find(zone(), value);
+  }
+
+  Node** FindRelocatableInt64Constant(int64_t value) {
+    return relocatable_int64_constants_.Find(zone(), value);
+  }
+
   // Return all nodes from the cache.
   void GetCachedNodes(ZoneVector<Node*>* nodes);
 
@@ -65,6 +73,8 @@
   IntPtrNodeCache external_constants_;
   Int64NodeCache number_constants_;
   IntPtrNodeCache heap_constants_;
+  Int32NodeCache relocatable_int32_constants_;
+  Int64NodeCache relocatable_int64_constants_;
   Zone* const zone_;
 
   DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 22e16a2..2f48683 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -27,10 +27,6 @@
       Int32Matcher mcond(cond);
       return mcond.Value() ? Decision::kTrue : Decision::kFalse;
     }
-    case IrOpcode::kInt64Constant: {
-      Int64Matcher mcond(cond);
-      return mcond.Value() ? Decision::kTrue : Decision::kFalse;
-    }
     case IrOpcode::kHeapConstant: {
       HeapObjectMatcher mcond(cond);
       return mcond.Value()->BooleanValue() ? Decision::kTrue : Decision::kFalse;
@@ -70,8 +66,6 @@
       return ReduceReturn(node);
     case IrOpcode::kSelect:
       return ReduceSelect(node);
-    case IrOpcode::kGuard:
-      return ReduceGuard(node);
     default:
       break;
   }
@@ -396,16 +390,6 @@
 }
 
 
-Reduction CommonOperatorReducer::ReduceGuard(Node* node) {
-  DCHECK_EQ(IrOpcode::kGuard, node->opcode());
-  Node* const input = NodeProperties::GetValueInput(node, 0);
-  Type* const input_type = NodeProperties::GetTypeOrAny(input);
-  Type* const guard_type = OpParameter<Type*>(node);
-  if (input_type->Is(guard_type)) return Replace(input);
-  return NoChange();
-}
-
-
 Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
                                         Node* a) {
   node->ReplaceInput(0, a);
diff --git a/src/compiler/common-operator-reducer.h b/src/compiler/common-operator-reducer.h
index 49d9f1d..b7aeeb7 100644
--- a/src/compiler/common-operator-reducer.h
+++ b/src/compiler/common-operator-reducer.h
@@ -36,7 +36,6 @@
   Reduction ReducePhi(Node* node);
   Reduction ReduceReturn(Node* node);
   Reduction ReduceSelect(Node* node);
-  Reduction ReduceGuard(Node* node);
 
   Reduction Change(Node* node, Operator const* op, Node* a);
   Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index 3bb1b34..d3f6972 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -98,6 +98,11 @@
   return OpParameter<SelectParameters>(op);
 }
 
+CallDescriptor const* CallDescriptorOf(const Operator* const op) {
+  DCHECK(op->opcode() == IrOpcode::kCall ||
+         op->opcode() == IrOpcode::kTailCall);
+  return OpParameter<CallDescriptor const*>(op);
+}
 
 size_t ProjectionIndexOf(const Operator* const op) {
   DCHECK_EQ(IrOpcode::kProjection, op->opcode());
@@ -142,6 +147,26 @@
   return os;
 }
 
+bool operator==(RelocatablePtrConstantInfo const& lhs,
+                RelocatablePtrConstantInfo const& rhs) {
+  return lhs.rmode() == rhs.rmode() && lhs.value() == rhs.value() &&
+         lhs.type() == rhs.type();
+}
+
+bool operator!=(RelocatablePtrConstantInfo const& lhs,
+                RelocatablePtrConstantInfo const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(RelocatablePtrConstantInfo const& p) {
+  return base::hash_combine(p.value(), p.rmode(), p.type());
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         RelocatablePtrConstantInfo const& p) {
+  return os << p.value() << "|" << p.rmode() << "|" << p.type();
+}
+
 #define CACHED_OP_LIST(V)                                    \
   V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)             \
   V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 0, 1)     \
@@ -154,6 +179,7 @@
   V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)         \
   V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)   \
   V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)     \
+  V(CheckPoint, Operator::kKontrol, 1, 1, 1, 0, 1, 0)        \
   V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0)       \
   V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
 
@@ -668,6 +694,23 @@
       value);                                         // parameter
 }
 
+const Operator* CommonOperatorBuilder::RelocatableInt32Constant(
+    int32_t value, RelocInfo::Mode rmode) {
+  return new (zone()) Operator1<RelocatablePtrConstantInfo>(  // --
+      IrOpcode::kRelocatableInt32Constant, Operator::kPure,   // opcode
+      "RelocatableInt32Constant",                             // name
+      0, 0, 0, 1, 0, 0,                                       // counts
+      RelocatablePtrConstantInfo(value, rmode));              // parameter
+}
+
+const Operator* CommonOperatorBuilder::RelocatableInt64Constant(
+    int64_t value, RelocInfo::Mode rmode) {
+  return new (zone()) Operator1<RelocatablePtrConstantInfo>(  // --
+      IrOpcode::kRelocatableInt64Constant, Operator::kPure,   // opcode
+      "RelocatableInt64Constant",                             // name
+      0, 0, 0, 1, 0, 0,                                       // counts
+      RelocatablePtrConstantInfo(value, rmode));              // parameter
+}
 
 const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,
                                               BranchHint hint) {
@@ -717,24 +760,6 @@
 }
 
 
-const Operator* CommonOperatorBuilder::Guard(Type* type) {
-  return new (zone()) Operator1<Type*>(      // --
-      IrOpcode::kGuard, Operator::kKontrol,  // opcode
-      "Guard",                               // name
-      1, 0, 1, 1, 0, 0,                      // counts
-      type);                                 // parameter
-}
-
-
-const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
-  DCHECK(arguments > 1);                      // Disallow empty/singleton sets.
-  return new (zone()) Operator(               // --
-      IrOpcode::kEffectSet, Operator::kPure,  // opcode
-      "EffectSet",                            // name
-      0, arguments, 0, 0, 1, 0);              // counts
-}
-
-
 const Operator* CommonOperatorBuilder::StateValues(int arguments) {
   switch (arguments) {
 #define CACHED_STATE_VALUES(arguments) \
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 7c59f47..c2a7a37 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -5,17 +5,13 @@
 #ifndef V8_COMPILER_COMMON_OPERATOR_H_
 #define V8_COMPILER_COMMON_OPERATOR_H_
 
+#include "src/assembler.h"
 #include "src/compiler/frame-states.h"
 #include "src/machine-type.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
-
-// Forward declarations.
-class ExternalReference;
-class Type;
-
 namespace compiler {
 
 // Forward declarations.
@@ -88,6 +84,7 @@
 
 SelectParameters const& SelectParametersOf(const Operator* const);
 
+CallDescriptor const* CallDescriptorOf(const Operator* const);
 
 size_t ProjectionIndexOf(const Operator* const);
 
@@ -114,6 +111,31 @@
 int ParameterIndexOf(const Operator* const);
 const ParameterInfo& ParameterInfoOf(const Operator* const);
 
+class RelocatablePtrConstantInfo final {
+ public:
+  enum Type { kInt32, kInt64 };
+
+  RelocatablePtrConstantInfo(int32_t value, RelocInfo::Mode rmode)
+      : value_(value), rmode_(rmode), type_(kInt32) {}
+  RelocatablePtrConstantInfo(int64_t value, RelocInfo::Mode rmode)
+      : value_(value), rmode_(rmode), type_(kInt64) {}
+
+  intptr_t value() const { return value_; }
+  RelocInfo::Mode rmode() const { return rmode_; }
+  Type type() const { return type_; }
+
+ private:
+  intptr_t value_;
+  RelocInfo::Mode rmode_;
+  Type type_;
+};
+
+bool operator==(RelocatablePtrConstantInfo const& lhs,
+                RelocatablePtrConstantInfo const& rhs);
+bool operator!=(RelocatablePtrConstantInfo const& lhs,
+                RelocatablePtrConstantInfo const& rhs);
+std::ostream& operator<<(std::ostream&, RelocatablePtrConstantInfo const&);
+size_t hash_value(RelocatablePtrConstantInfo const& p);
 
 // Interface for building common operators that can be used at any level of IR,
 // including JavaScript, mid-level, and low-level.
@@ -155,12 +177,16 @@
   const Operator* NumberConstant(volatile double);
   const Operator* HeapConstant(const Handle<HeapObject>&);
 
+  const Operator* RelocatableInt32Constant(int32_t value,
+                                           RelocInfo::Mode rmode);
+  const Operator* RelocatableInt64Constant(int64_t value,
+                                           RelocInfo::Mode rmode);
+
   const Operator* Select(MachineRepresentation, BranchHint = BranchHint::kNone);
   const Operator* Phi(MachineRepresentation representation,
                       int value_input_count);
   const Operator* EffectPhi(int effect_input_count);
-  const Operator* EffectSet(int arguments);
-  const Operator* Guard(Type* type);
+  const Operator* CheckPoint();
   const Operator* BeginRegion();
   const Operator* FinishRegion();
   const Operator* StateValues(int arguments);
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
new file mode 100644
index 0000000..716723b
--- /dev/null
+++ b/src/compiler/effect-control-linearizer.cc
@@ -0,0 +1,983 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/effect-control-linearizer.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+EffectControlLinearizer::EffectControlLinearizer(JSGraph* js_graph,
+                                                 Schedule* schedule,
+                                                 Zone* temp_zone)
+    : js_graph_(js_graph), schedule_(schedule), temp_zone_(temp_zone) {}
+
+Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
+CommonOperatorBuilder* EffectControlLinearizer::common() const {
+  return js_graph_->common();
+}
+SimplifiedOperatorBuilder* EffectControlLinearizer::simplified() const {
+  return js_graph_->simplified();
+}
+MachineOperatorBuilder* EffectControlLinearizer::machine() const {
+  return js_graph_->machine();
+}
+
+namespace {
+
+struct BlockEffectControlData {
+  Node* current_effect = nullptr;  // New effect.
+  Node* current_control = nullptr;  // New control.
+};
+
+// Effect phis that need to be updated after the first pass.
+struct PendingEffectPhi {
+  Node* effect_phi;
+  BasicBlock* block;
+
+  PendingEffectPhi(Node* effect_phi, BasicBlock* block)
+      : effect_phi(effect_phi), block(block) {}
+};
+
+void UpdateEffectPhi(Node* node, BasicBlock* block,
+                     ZoneVector<BlockEffectControlData>* block_effects) {
+  // Update all inputs to an effect phi with the effects from the given
+  // block->effect map.
+  DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+  DCHECK_EQ(node->op()->EffectInputCount(), block->PredecessorCount());
+  for (int i = 0; i < node->op()->EffectInputCount(); i++) {
+    Node* input = node->InputAt(i);
+    BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
+    Node* input_effect =
+        (*block_effects)[predecessor->rpo_number()].current_effect;
+    if (input != input_effect) {
+      node->ReplaceInput(i, input_effect);
+    }
+  }
+}
+
+void UpdateBlockControl(BasicBlock* block,
+                        ZoneVector<BlockEffectControlData>* block_effects) {
+  Node* control = block->NodeAt(0);
+  DCHECK(NodeProperties::IsControl(control));
+
+  // Do not rewire the end node.
+  if (control->opcode() == IrOpcode::kEnd) return;
+
+  // Update all inputs to the given control node with the correct control.
+  DCHECK_EQ(control->op()->ControlInputCount(), block->PredecessorCount());
+  for (int i = 0; i < control->op()->ControlInputCount(); i++) {
+    Node* input = NodeProperties::GetControlInput(control, i);
+    BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
+    Node* input_control =
+        (*block_effects)[predecessor->rpo_number()].current_control;
+    if (input != input_control) {
+      NodeProperties::ReplaceControlInput(control, input_control, i);
+    }
+  }
+}
+
+bool HasIncomingBackEdges(BasicBlock* block) {
+  for (BasicBlock* pred : block->predecessors()) {
+    if (pred->rpo_number() >= block->rpo_number()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void RemoveRegionNode(Node* node) {
+  DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
+         IrOpcode::kBeginRegion == node->opcode());
+  // Update the value/context uses to the value input of the finish node and
+  // the effect uses to the effect input.
+  for (Edge edge : node->use_edges()) {
+    DCHECK(!edge.from()->IsDead());
+    if (NodeProperties::IsEffectEdge(edge)) {
+      edge.UpdateTo(NodeProperties::GetEffectInput(node));
+    } else {
+      DCHECK(!NodeProperties::IsControlEdge(edge));
+      DCHECK(!NodeProperties::IsFrameStateEdge(edge));
+      edge.UpdateTo(node->InputAt(0));
+    }
+  }
+  node->Kill();
+}
+
+}  // namespace
+
+void EffectControlLinearizer::Run() {
+  ZoneVector<BlockEffectControlData> block_effects(temp_zone());
+  ZoneVector<PendingEffectPhi> pending_effect_phis(temp_zone());
+  ZoneVector<BasicBlock*> pending_block_controls(temp_zone());
+  block_effects.resize(schedule()->RpoBlockCount());
+  NodeVector inputs_buffer(temp_zone());
+
+  for (BasicBlock* block : *(schedule()->rpo_order())) {
+    size_t instr = 0;
+
+    // The control node should be the first.
+    Node* control = block->NodeAt(instr);
+    DCHECK(NodeProperties::IsControl(control));
+    // Update the control inputs.
+    if (HasIncomingBackEdges(block)) {
+      // If there are back edges, we need to update later because we have not
+      // computed the control yet. This should only happen for loops.
+      DCHECK_EQ(IrOpcode::kLoop, control->opcode());
+      pending_block_controls.push_back(block);
+    } else {
+      // If there are no back edges, we can update now.
+      UpdateBlockControl(block, &block_effects);
+    }
+    instr++;
+
+    // Iterate over the phis and update the effect phis.
+    Node* effect = nullptr;
+    Node* terminate = nullptr;
+    for (; instr < block->NodeCount(); instr++) {
+      Node* node = block->NodeAt(instr);
+      // Only go through the phis and effect phis.
+      if (node->opcode() == IrOpcode::kEffectPhi) {
+        // There should be at most one effect phi in a block.
+        DCHECK_NULL(effect);
+        // IfException blocks should not have effect phis.
+        DCHECK_NE(IrOpcode::kIfException, control->opcode());
+        effect = node;
+
+        // Make sure we update the inputs to the incoming blocks' effects.
+        if (HasIncomingBackEdges(block)) {
+          // In case of loops, we do not update the effect phi immediately
+          // because the back predecessor has not been handled yet. We just
+          // record the effect phi for later processing.
+          pending_effect_phis.push_back(PendingEffectPhi(node, block));
+        } else {
+          UpdateEffectPhi(node, block, &block_effects);
+        }
+      } else if (node->opcode() == IrOpcode::kPhi) {
+        // Just skip phis.
+      } else if (node->opcode() == IrOpcode::kTerminate) {
+        DCHECK(terminate == nullptr);
+        terminate = node;
+      } else {
+        break;
+      }
+    }
+
+    if (effect == nullptr) {
+      // There was no effect phi.
+      DCHECK(!HasIncomingBackEdges(block));
+      if (block == schedule()->start()) {
+        // Start block => effect is start.
+        DCHECK_EQ(graph()->start(), control);
+        effect = graph()->start();
+      } else if (control->opcode() == IrOpcode::kEnd) {
+        // End block is just a dummy, no effect needed.
+        DCHECK_EQ(BasicBlock::kNone, block->control());
+        DCHECK_EQ(1u, block->size());
+        effect = nullptr;
+      } else {
+        // If all the predecessors have the same effect, we can use it
+        // as our current effect.
+        int rpo_number = block->PredecessorAt(0)->rpo_number();
+        effect = block_effects[rpo_number].current_effect;
+        for (size_t i = 1; i < block->PredecessorCount(); i++) {
+          int rpo_number = block->PredecessorAt(i)->rpo_number();
+          if (block_effects[rpo_number].current_effect != effect) {
+            effect = nullptr;
+            break;
+          }
+        }
+        if (effect == nullptr) {
+          DCHECK_NE(IrOpcode::kIfException, control->opcode());
+          // The input blocks do not have the same effect. We have
+          // to create an effect phi node.
+          inputs_buffer.clear();
+          inputs_buffer.resize(block->PredecessorCount(), graph()->start());
+          inputs_buffer.push_back(control);
+          effect = graph()->NewNode(
+              common()->EffectPhi(static_cast<int>(block->PredecessorCount())),
+              static_cast<int>(inputs_buffer.size()), &(inputs_buffer.front()));
+          // Let us update the effect phi node later.
+          pending_effect_phis.push_back(PendingEffectPhi(effect, block));
+        } else if (control->opcode() == IrOpcode::kIfException) {
+          // The IfException is connected into the effect chain, so we need
+          // to update the effect here.
+          NodeProperties::ReplaceEffectInput(control, effect);
+          effect = control;
+        }
+      }
+    }
+
+    // Fixup the Terminate node.
+    if (terminate != nullptr) {
+      NodeProperties::ReplaceEffectInput(terminate, effect);
+    }
+
+    // Process the ordinary instructions.
+    for (; instr < block->NodeCount(); instr++) {
+      Node* node = block->NodeAt(instr);
+      ProcessNode(node, &effect, &control);
+    }
+
+    switch (block->control()) {
+      case BasicBlock::kGoto:
+      case BasicBlock::kNone:
+        break;
+
+      case BasicBlock::kCall:
+      case BasicBlock::kTailCall:
+      case BasicBlock::kBranch:
+      case BasicBlock::kSwitch:
+      case BasicBlock::kReturn:
+      case BasicBlock::kDeoptimize:
+      case BasicBlock::kThrow:
+        ProcessNode(block->control_input(), &effect, &control);
+        break;
+    }
+
+    // Store the effect for later use.
+    block_effects[block->rpo_number()].current_effect = effect;
+    block_effects[block->rpo_number()].current_control = control;
+  }
+
+  // Update the incoming edges of the effect phis that could not be processed
+  // during the first pass (because they could have incoming back edges).
+  for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
+    UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
+                    &block_effects);
+  }
+  for (BasicBlock* pending_block_control : pending_block_controls) {
+    UpdateBlockControl(pending_block_control, &block_effects);
+  }
+}
+
+namespace {
+
+void TryScheduleCallIfSuccess(Node* node, Node** control) {
+  // Schedule the call's IfSuccess node if there is no exception use.
+  if (!NodeProperties::IsExceptionalCall(node)) {
+    for (Edge edge : node->use_edges()) {
+      if (NodeProperties::IsControlEdge(edge) &&
+          edge.from()->opcode() == IrOpcode::kIfSuccess) {
+        *control = edge.from();
+      }
+    }
+  }
+}
+
+}  // namespace
+
+void EffectControlLinearizer::ProcessNode(Node* node, Node** effect,
+                                          Node** control) {
+  // If the node needs to be wired into the effect/control chain, do this
+  // here.
+  if (TryWireInStateEffect(node, effect, control)) {
+    return;
+  }
+
+  // Remove the end markers of 'atomic' allocation region because the
+  // region should be wired-in now.
+  if (node->opcode() == IrOpcode::kFinishRegion ||
+      node->opcode() == IrOpcode::kBeginRegion) {
+    // Update the value uses to the value input of the finish node and
+    // the effect uses to the effect input.
+    return RemoveRegionNode(node);
+  }
+
+  // Special treatment for CheckPoint nodes.
+  // TODO(epertoso): Pickup the current frame state.
+  if (node->opcode() == IrOpcode::kCheckPoint) {
+    // Unlink the check point; effect uses will be updated to the incoming
+    // effect that is passed.
+    node->Kill();
+    return;
+  }
+
+  if (node->opcode() == IrOpcode::kIfSuccess) {
+    // We always schedule IfSuccess with its call, so skip it here.
+    DCHECK_EQ(IrOpcode::kCall, node->InputAt(0)->opcode());
+    // The IfSuccess node should not belong to an exceptional call node
+    // because such IfSuccess nodes should only start a basic block (and
+    // basic block start nodes are not handled in the ProcessNode method).
+    DCHECK(!NodeProperties::IsExceptionalCall(node->InputAt(0)));
+    return;
+  }
+
+  // If the node takes an effect, replace with the current one.
+  if (node->op()->EffectInputCount() > 0) {
+    DCHECK_EQ(1, node->op()->EffectInputCount());
+    Node* input_effect = NodeProperties::GetEffectInput(node);
+
+    if (input_effect != *effect) {
+      NodeProperties::ReplaceEffectInput(node, *effect);
+    }
+
+    // If the node produces an effect, update our current effect. (However,
+    // ignore new effect chains started with ValueEffect.)
+    if (node->op()->EffectOutputCount() > 0) {
+      DCHECK_EQ(1, node->op()->EffectOutputCount());
+      *effect = node;
+    }
+  } else {
+    // New effect chain is only started with a Start or ValueEffect node.
+    DCHECK(node->op()->EffectOutputCount() == 0 ||
+           node->opcode() == IrOpcode::kStart);
+  }
+
+  // Rewire control inputs.
+  for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+    NodeProperties::ReplaceControlInput(node, *control, i);
+  }
+  // Update the current control and wire IfSuccess right after calls.
+  if (node->op()->ControlOutputCount() > 0) {
+    *control = node;
+    if (node->opcode() == IrOpcode::kCall) {
+      // Schedule the call's IfSuccess node (if there is no exception use).
+      TryScheduleCallIfSuccess(node, control);
+    }
+  }
+}
+
+bool EffectControlLinearizer::TryWireInStateEffect(Node* node, Node** effect,
+                                                   Node** control) {
+  ValueEffectControl state(nullptr, nullptr, nullptr);
+  switch (node->opcode()) {
+    case IrOpcode::kTypeGuard:
+      state = LowerTypeGuard(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeBitToTagged:
+      state = LowerChangeBitToTagged(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeInt31ToTaggedSigned:
+      state = LowerChangeInt31ToTaggedSigned(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeInt32ToTagged:
+      state = LowerChangeInt32ToTagged(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeUint32ToTagged:
+      state = LowerChangeUint32ToTagged(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeFloat64ToTagged:
+      state = LowerChangeFloat64ToTagged(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeTaggedSignedToInt32:
+      state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeTaggedToBit:
+      state = LowerChangeTaggedToBit(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeTaggedToInt32:
+      state = LowerChangeTaggedToInt32(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeTaggedToUint32:
+      state = LowerChangeTaggedToUint32(node, *effect, *control);
+      break;
+    case IrOpcode::kChangeTaggedToFloat64:
+      state = LowerChangeTaggedToFloat64(node, *effect, *control);
+      break;
+    case IrOpcode::kTruncateTaggedToWord32:
+      state = LowerTruncateTaggedToWord32(node, *effect, *control);
+      break;
+    case IrOpcode::kObjectIsCallable:
+      state = LowerObjectIsCallable(node, *effect, *control);
+      break;
+    case IrOpcode::kObjectIsNumber:
+      state = LowerObjectIsNumber(node, *effect, *control);
+      break;
+    case IrOpcode::kObjectIsReceiver:
+      state = LowerObjectIsReceiver(node, *effect, *control);
+      break;
+    case IrOpcode::kObjectIsSmi:
+      state = LowerObjectIsSmi(node, *effect, *control);
+      break;
+    case IrOpcode::kObjectIsString:
+      state = LowerObjectIsString(node, *effect, *control);
+      break;
+    case IrOpcode::kObjectIsUndetectable:
+      state = LowerObjectIsUndetectable(node, *effect, *control);
+      break;
+    default:
+      return false;
+  }
+  NodeProperties::ReplaceUses(node, state.value);
+  *effect = state.effect;
+  *control = state.control;
+  return true;
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTypeGuard(Node* node, Node* effect,
+                                        Node* control) {
+  Node* value = node->InputAt(0);
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
+                                                    Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
+  Node* check_same = graph()->NewNode(
+      machine()->Float64Equal(), value,
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+  Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
+
+  Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
+  Node* vsmi;
+  Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
+
+  // Check if {value} is -0.
+  Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+                                      jsgraph()->Int32Constant(0));
+  Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check_zero, if_smi);
+
+  Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+  Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+  // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+  Node* check_negative = graph()->NewNode(
+      machine()->Int32LessThan(),
+      graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+      jsgraph()->Int32Constant(0));
+  Node* branch_negative = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                           check_negative, if_zero);
+
+  Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
+  Node* if_notnegative = graph()->NewNode(common()->IfFalse(), branch_negative);
+
+  // We need to create a box for negative 0.
+  if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
+  if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
+
+  // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+  // machines we need to deal with potential overflow and fallback to boxing.
+  if (machine()->Is64()) {
+    vsmi = ChangeInt32ToSmi(value32);
+  } else {
+    Node* smi_tag =
+        graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
+
+    Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
+    Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                        check_ovf, if_smi);
+
+    Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
+    if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
+
+    if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
+    vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
+  }
+
+  // Allocate the box for the {value}.
+  ValueEffectControl box = AllocateHeapNumberWithValue(value, effect, if_box);
+
+  control = graph()->NewNode(common()->Merge(2), if_smi, box.control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                           vsmi, box.value, control);
+  effect =
+      graph()->NewNode(common()->EffectPhi(2), effect, box.effect, control);
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
+                                                Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* branch = graph()->NewNode(common()->Branch(), value, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = jsgraph()->TrueConstant();
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse = jsgraph()->FalseConstant();
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                           vtrue, vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node,
+                                                        Node* effect,
+                                                        Node* control) {
+  Node* value = node->InputAt(0);
+  value = ChangeInt32ToSmi(value);
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node, Node* effect,
+                                                  Node* control) {
+  Node* value = node->InputAt(0);
+
+  if (machine()->Is64()) {
+    return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
+  }
+
+  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
+
+  Node* ovf = graph()->NewNode(common()->Projection(1), add);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  ValueEffectControl alloc =
+      AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse = graph()->NewNode(common()->Projection(0), add);
+
+  Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
+  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                               alloc.value, vfalse, merge);
+  Node* ephi =
+      graph()->NewNode(common()->EffectPhi(2), alloc.effect, effect, merge);
+
+  return ValueEffectControl(phi, ephi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node, Node* effect,
+                                                   Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
+                                 SmiMaxValueConstant());
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = ChangeUint32ToSmi(value);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  ValueEffectControl alloc = AllocateHeapNumberWithValue(
+      ChangeUint32ToFloat64(value), effect, if_false);
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, alloc.control);
+  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                               vtrue, alloc.value, merge);
+  Node* ephi =
+      graph()->NewNode(common()->EffectPhi(2), effect, alloc.effect, merge);
+
+  return ValueEffectControl(phi, ephi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node,
+                                                        Node* effect,
+                                                        Node* control) {
+  Node* value = node->InputAt(0);
+  value = ChangeSmiToInt32(value);
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToBit(Node* node, Node* effect,
+                                                Node* control) {
+  Node* value = node->InputAt(0);
+  value = graph()->NewNode(machine()->WordEqual(), value,
+                           jsgraph()->TrueConstant());
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
+                                                  Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = ChangeSmiToInt32(value);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+    vfalse = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+        efalse, if_false);
+    vfalse = graph()->NewNode(machine()->ChangeFloat64ToInt32(), vfalse);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                           vtrue, vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node, Node* effect,
+                                                   Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = ChangeSmiToInt32(value);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+    vfalse = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+        efalse, if_false);
+    vfalse = graph()->NewNode(machine()->ChangeFloat64ToUint32(), vfalse);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                           vtrue, vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
+                                                    Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue;
+  {
+    vtrue = ChangeSmiToInt32(value);
+    vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
+  }
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+    vfalse = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+        efalse, if_false);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue, vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
+                                                     Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = ChangeSmiToInt32(value);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+    vfalse = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+        efalse, if_false);
+    vfalse = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                           vtrue, vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsCallable(Node* node, Node* effect,
+                                               Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = jsgraph()->Int32Constant(0);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    Node* value_map = efalse =
+        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                         value, efalse, if_false);
+    Node* value_bit_field = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
+        efalse, if_false);
+    vfalse = graph()->NewNode(
+        machine()->Word32Equal(),
+        jsgraph()->Int32Constant(1 << Map::kIsCallable),
+        graph()->NewNode(
+            machine()->Word32And(), value_bit_field,
+            jsgraph()->Int32Constant((1 << Map::kIsCallable) |
+                                     (1 << Map::kIsUndetectable))));
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+                           vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsNumber(Node* node, Node* effect,
+                                             Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = jsgraph()->Int32Constant(1);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    Node* value_map = efalse =
+        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                         value, efalse, if_false);
+    vfalse = graph()->NewNode(machine()->WordEqual(), value_map,
+                              jsgraph()->HeapNumberMapConstant());
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+                           vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsReceiver(Node* node, Node* effect,
+                                               Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = jsgraph()->Int32Constant(0);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    Node* value_map = efalse =
+        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                         value, efalse, if_false);
+    Node* value_instance_type = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+        efalse, if_false);
+    vfalse = graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+                              jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+                              value_instance_type);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+                           vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsSmi(Node* node, Node* effect,
+                                          Node* control) {
+  Node* value = node->InputAt(0);
+  value = ObjectIsSmi(value);
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsString(Node* node, Node* effect,
+                                             Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = jsgraph()->Int32Constant(0);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    Node* value_map = efalse =
+        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                         value, efalse, if_false);
+    Node* value_instance_type = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+        efalse, if_false);
+    vfalse = graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
+                              jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+                           vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsUndetectable(Node* node, Node* effect,
+                                                   Node* control) {
+  Node* value = node->InputAt(0);
+
+  Node* check = ObjectIsSmi(value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = jsgraph()->Int32Constant(0);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    Node* value_map = efalse =
+        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                         value, efalse, if_false);
+    Node* value_bit_field = efalse = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
+        efalse, if_false);
+    vfalse = graph()->NewNode(
+        machine()->Word32Equal(),
+        graph()->NewNode(
+            machine()->Word32Equal(), jsgraph()->Int32Constant(0),
+            graph()->NewNode(
+                machine()->Word32And(), value_bit_field,
+                jsgraph()->Int32Constant(1 << Map::kIsUndetectable))),
+        jsgraph()->Int32Constant(0));
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+                           vfalse, control);
+
+  return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
+                                                     Node* control) {
+  Node* result = effect = graph()->NewNode(
+      simplified()->Allocate(NOT_TENURED),
+      jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
+  effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+                            result, jsgraph()->HeapNumberMapConstant(), effect,
+                            control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
+      value, effect, control);
+  return ValueEffectControl(result, effect, control);
+}
+
+Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
+  if (machine()->Is64()) {
+    value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+  }
+  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
+  if (machine()->Is64()) {
+    value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
+  }
+  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+Node* EffectControlLinearizer::ChangeInt32ToFloat64(Node* value) {
+  return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
+}
+
+Node* EffectControlLinearizer::ChangeUint32ToFloat64(Node* value) {
+  return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
+}
+
+Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
+  value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+  if (machine()->Is64()) {
+    value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+  }
+  return value;
+}
+
+Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
+  return graph()->NewNode(
+      machine()->WordEqual(),
+      graph()->NewNode(machine()->WordAnd(), value,
+                       jsgraph()->IntPtrConstant(kSmiTagMask)),
+      jsgraph()->IntPtrConstant(kSmiTag));
+}
+
+Node* EffectControlLinearizer::SmiMaxValueConstant() {
+  return jsgraph()->Int32Constant(Smi::kMaxValue);
+}
+
+Node* EffectControlLinearizer::SmiShiftBitsConstant() {
+  return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
new file mode 100644
index 0000000..7d7f938
--- /dev/null
+++ b/src/compiler/effect-control-linearizer.h
@@ -0,0 +1,108 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
+#define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class CommonOperatorBuilder;
+class SimplifiedOperatorBuilder;
+class MachineOperatorBuilder;
+class JSGraph;
+class Graph;
+class Schedule;
+
+class EffectControlLinearizer {
+ public:
+  EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone);
+
+  void Run();
+
+ private:
+  void ProcessNode(Node* node, Node** current_effect, Node** control);
+
+  struct ValueEffectControl {
+    Node* value;
+    Node* effect;
+    Node* control;
+    ValueEffectControl(Node* value, Node* effect, Node* control)
+        : value(value), effect(effect), control(control) {}
+  };
+
+  bool TryWireInStateEffect(Node* node, Node** effect, Node** control);
+  ValueEffectControl LowerTypeGuard(Node* node, Node* effect, Node* control);
+  ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
+                                            Node* control);
+  ValueEffectControl LowerChangeInt31ToTaggedSigned(Node* node, Node* effect,
+                                                    Node* control);
+  ValueEffectControl LowerChangeInt32ToTagged(Node* node, Node* effect,
+                                              Node* control);
+  ValueEffectControl LowerChangeUint32ToTagged(Node* node, Node* effect,
+                                               Node* control);
+  ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
+                                                Node* control);
+  ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
+                                                    Node* control);
+  ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
+                                            Node* control);
+  ValueEffectControl LowerChangeTaggedToInt32(Node* node, Node* effect,
+                                              Node* control);
+  ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
+                                               Node* control);
+  ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
+                                                Node* control);
+  ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
+                                                 Node* control);
+  ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
+                                           Node* control);
+  ValueEffectControl LowerObjectIsNumber(Node* node, Node* effect,
+                                         Node* control);
+  ValueEffectControl LowerObjectIsReceiver(Node* node, Node* effect,
+                                           Node* control);
+  ValueEffectControl LowerObjectIsSmi(Node* node, Node* effect, Node* control);
+  ValueEffectControl LowerObjectIsString(Node* node, Node* effect,
+                                         Node* control);
+  ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
+                                               Node* control);
+  ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
+                                                 Node* control);
+
+  Node* ChangeInt32ToSmi(Node* value);
+  Node* ChangeUint32ToSmi(Node* value);
+  Node* ChangeInt32ToFloat64(Node* value);
+  Node* ChangeUint32ToFloat64(Node* value);
+  Node* ChangeSmiToInt32(Node* value);
+  Node* ObjectIsSmi(Node* value);
+
+  Node* SmiMaxValueConstant();
+  Node* SmiShiftBitsConstant();
+
+  JSGraph* jsgraph() const { return js_graph_; }
+  Graph* graph() const;
+  Schedule* schedule() const { return schedule_; }
+  Zone* temp_zone() const { return temp_zone_; }
+  CommonOperatorBuilder* common() const;
+  SimplifiedOperatorBuilder* simplified() const;
+  MachineOperatorBuilder* machine() const;
+
+  JSGraph* js_graph_;
+  Schedule* schedule_;
+  Zone* temp_zone_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index 313b639..8402366 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/escape-analysis-reducer.h"
 
+#include "src/compiler/all-nodes.h"
 #include "src/compiler/js-graph.h"
 #include "src/counters.h"
 
@@ -28,8 +29,7 @@
       escape_analysis_(escape_analysis),
       zone_(zone),
       fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
-      exists_virtual_allocate_(true) {}
-
+      exists_virtual_allocate_(escape_analysis->ExistsVirtualAllocate()) {}
 
 Reduction EscapeAnalysisReducer::Reduce(Node* node) {
   if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
@@ -105,7 +105,7 @@
     fully_reduced_.Add(node->id());
   }
   if (Node* rep = escape_analysis()->GetReplacement(node)) {
-    counters()->turbo_escape_loads_replaced()->Increment();
+    isolate()->counters()->turbo_escape_loads_replaced()->Increment();
     TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
           node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
     ReplaceWithValue(node, rep);
@@ -138,7 +138,7 @@
   }
   if (escape_analysis()->IsVirtual(node)) {
     RelaxEffectsAndControls(node);
-    counters()->turbo_escape_allocs_replaced()->Increment();
+    isolate()->counters()->turbo_escape_allocs_replaced()->Increment();
     TRACE("Removed allocate #%d from effect chain\n", node->id());
     return Changed(node);
   }
@@ -328,40 +328,19 @@
 }
 
 
-Counters* EscapeAnalysisReducer::counters() const {
-  return jsgraph_->isolate()->counters();
-}
-
-
-class EscapeAnalysisVerifier final : public AdvancedReducer {
- public:
-  EscapeAnalysisVerifier(Editor* editor, EscapeAnalysis* escape_analysis)
-      : AdvancedReducer(editor), escape_analysis_(escape_analysis) {}
-
-  Reduction Reduce(Node* node) final {
-    switch (node->opcode()) {
-      case IrOpcode::kAllocate:
-        CHECK(!escape_analysis_->IsVirtual(node));
-        break;
-      default:
-        break;
-    }
-    return NoChange();
-  }
-
- private:
-  EscapeAnalysis* escape_analysis_;
-};
-
 void EscapeAnalysisReducer::VerifyReplacement() const {
 #ifdef DEBUG
-  GraphReducer graph_reducer(zone(), jsgraph()->graph());
-  EscapeAnalysisVerifier verifier(&graph_reducer, escape_analysis());
-  graph_reducer.AddReducer(&verifier);
-  graph_reducer.ReduceGraph();
+  AllNodes all(zone(), jsgraph()->graph());
+  for (Node* node : all.live) {
+    if (node->opcode() == IrOpcode::kAllocate) {
+      CHECK(!escape_analysis_->IsVirtual(node));
+    }
+  }
 #endif  // DEBUG
 }
 
+Isolate* EscapeAnalysisReducer::isolate() const { return jsgraph_->isolate(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/escape-analysis-reducer.h b/src/compiler/escape-analysis-reducer.h
index 12487b1..ad67479 100644
--- a/src/compiler/escape-analysis-reducer.h
+++ b/src/compiler/escape-analysis-reducer.h
@@ -9,29 +9,22 @@
 #include "src/compiler/escape-analysis.h"
 #include "src/compiler/graph-reducer.h"
 
-
 namespace v8 {
 namespace internal {
-
-// Forward declarations.
-class Counters;
-
-
 namespace compiler {
 
 // Forward declarations.
 class JSGraph;
 
-
 class EscapeAnalysisReducer final : public AdvancedReducer {
  public:
   EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
                         EscapeAnalysis* escape_analysis, Zone* zone);
 
   Reduction Reduce(Node* node) final;
-  void SetExistsVirtualAllocate(bool exists) {
-    exists_virtual_allocate_ = exists;
-  }
+
+  // Verifies that all virtual allocation nodes have been dealt with. Run it
+  // after this reducer has been applied. Has no effect in release mode.
   void VerifyReplacement() const;
 
  private:
@@ -50,12 +43,12 @@
   JSGraph* jsgraph() const { return jsgraph_; }
   EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
   Zone* zone() const { return zone_; }
-  Counters* counters() const;
+  Isolate* isolate() const;
 
   JSGraph* const jsgraph_;
   EscapeAnalysis* escape_analysis_;
   Zone* const zone_;
-  // _visited marks nodes we already processed (allocs, loads, stores)
+  // This bit vector marks nodes we already processed (allocs, loads, stores)
   // and nodes that do not need a visit from ReduceDeoptState etc.
   BitVector fully_reduced_;
   bool exists_virtual_allocate_;
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index b1a12b2..d11c3ab 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -24,7 +24,7 @@
 namespace internal {
 namespace compiler {
 
-using Alias = EscapeStatusAnalysis::Alias;
+typedef NodeId Alias;
 
 #ifdef DEBUG
 #define TRACE(...)                                    \
@@ -35,6 +35,90 @@
 #define TRACE(...)
 #endif
 
+// EscapeStatusAnalysis determines for each allocation whether it escapes.
+class EscapeStatusAnalysis : public ZoneObject {
+ public:
+  enum Status {
+    kUnknown = 0u,
+    kTracked = 1u << 0,
+    kEscaped = 1u << 1,
+    kOnStack = 1u << 2,
+    kVisited = 1u << 3,
+    // A node is dangling, if it is a load of some kind, and does not have
+    // an effect successor.
+    kDanglingComputed = 1u << 4,
+    kDangling = 1u << 5,
+    // A node is is an effect branch point, if it has more than 2 non-dangling
+    // effect successors.
+    kBranchPointComputed = 1u << 6,
+    kBranchPoint = 1u << 7,
+    kInQueue = 1u << 8
+  };
+  typedef base::Flags<Status, uint16_t> StatusFlags;
+
+  void RunStatusAnalysis();
+
+  bool IsVirtual(Node* node);
+  bool IsEscaped(Node* node);
+  bool IsAllocation(Node* node);
+
+  bool IsInQueue(NodeId id);
+  void SetInQueue(NodeId id, bool on_stack);
+
+  void DebugPrint();
+
+  EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
+                       Zone* zone);
+  void EnqueueForStatusAnalysis(Node* node);
+  bool SetEscaped(Node* node);
+  bool IsEffectBranchPoint(Node* node);
+  bool IsDanglingEffectNode(Node* node);
+  void ResizeStatusVector();
+  size_t GetStatusVectorSize();
+  bool IsVirtual(NodeId id);
+
+  Graph* graph() const { return graph_; }
+  void AssignAliases();
+  Alias GetAlias(NodeId id) const { return aliases_[id]; }
+  const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
+  Alias AliasCount() const { return next_free_alias_; }
+  static const Alias kNotReachable;
+  static const Alias kUntrackable;
+
+  bool IsNotReachable(Node* node);
+
+ private:
+  void Process(Node* node);
+  void ProcessAllocate(Node* node);
+  void ProcessFinishRegion(Node* node);
+  void ProcessStoreField(Node* node);
+  void ProcessStoreElement(Node* node);
+  bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
+    return CheckUsesForEscape(node, node, phi_escaping);
+  }
+  bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
+  void RevisitUses(Node* node);
+  void RevisitInputs(Node* node);
+
+  Alias NextAlias() { return next_free_alias_++; }
+
+  bool HasEntry(Node* node);
+
+  bool IsAllocationPhi(Node* node);
+
+  ZoneVector<Node*> stack_;
+  EscapeAnalysis* object_analysis_;
+  Graph* const graph_;
+  ZoneVector<StatusFlags> status_;
+  Alias next_free_alias_;
+  ZoneVector<Node*> status_stack_;
+  ZoneVector<Alias> aliases_;
+
+  DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
+
 const Alias EscapeStatusAnalysis::kNotReachable =
     std::numeric_limits<Alias>::max();
 const Alias EscapeStatusAnalysis::kUntrackable =
@@ -475,14 +559,11 @@
     : stack_(zone),
       object_analysis_(object_analysis),
       graph_(graph),
-      zone_(zone),
       status_(zone),
       next_free_alias_(0),
       status_stack_(zone),
       aliases_(zone) {}
 
-EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
-
 bool EscapeStatusAnalysis::HasEntry(Node* node) {
   return status_[node->id()] & (kTracked | kEscaped);
 }
@@ -712,6 +793,7 @@
         }
         break;
       case IrOpcode::kSelect:
+      case IrOpcode::kTypeGuard:
         if (SetEscaped(rep)) {
           TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
                 rep->id(), rep->op()->mnemonic(), use->id(),
@@ -721,7 +803,8 @@
         break;
       default:
         if (use->op()->EffectInputCount() == 0 &&
-            uses->op()->EffectInputCount() > 0) {
+            uses->op()->EffectInputCount() > 0 &&
+            !IrOpcode::IsJsOpcode(use->opcode())) {
           TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
                 use->op()->mnemonic());
           UNREACHABLE();
@@ -759,8 +842,9 @@
 
 EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
                                Zone* zone)
-    : status_analysis_(this, graph, zone),
+    : zone_(zone),
       common_(common),
+      status_analysis_(new (zone) EscapeStatusAnalysis(this, graph, zone)),
       virtual_states_(zone),
       replacements_(zone),
       cache_(nullptr) {}
@@ -769,13 +853,13 @@
 
 void EscapeAnalysis::Run() {
   replacements_.resize(graph()->NodeCount());
-  status_analysis_.AssignAliases();
-  if (status_analysis_.AliasCount() > 0) {
+  status_analysis_->AssignAliases();
+  if (status_analysis_->AliasCount() > 0) {
     cache_ = new (zone()) MergeCache(zone());
     replacements_.resize(graph()->NodeCount());
-    status_analysis_.ResizeStatusVector();
+    status_analysis_->ResizeStatusVector();
     RunObjectAnalysis();
-    status_analysis_.RunStatusAnalysis();
+    status_analysis_->RunStatusAnalysis();
   }
 }
 
@@ -853,11 +937,11 @@
   while (!queue.empty()) {
     Node* node = queue.back();
     queue.pop_back();
-    status_analysis_.SetInQueue(node->id(), false);
+    status_analysis_->SetInQueue(node->id(), false);
     if (Process(node)) {
       for (Edge edge : node->use_edges()) {
         Node* use = edge.from();
-        if (IsNotReachable(use)) {
+        if (status_analysis_->IsNotReachable(use)) {
           continue;
         }
         if (NodeProperties::IsEffectEdge(edge)) {
@@ -865,14 +949,14 @@
           // We need DFS do avoid some duplication of VirtualStates and
           // VirtualObjects, and we want to delay phis to improve performance.
           if (use->opcode() == IrOpcode::kEffectPhi) {
-            if (!status_analysis_.IsInQueue(use->id())) {
+            if (!status_analysis_->IsInQueue(use->id())) {
               queue.push_front(use);
             }
           } else if ((use->opcode() != IrOpcode::kLoadField &&
                       use->opcode() != IrOpcode::kLoadElement) ||
-                     !IsDanglingEffectNode(use)) {
-            if (!status_analysis_.IsInQueue(use->id())) {
-              status_analysis_.SetInQueue(use->id(), true);
+                     !status_analysis_->IsDanglingEffectNode(use)) {
+            if (!status_analysis_->IsInQueue(use->id())) {
+              status_analysis_->SetInQueue(use->id(), true);
               queue.push_back(use);
             }
           } else {
@@ -1008,8 +1092,8 @@
           if (!obj->AllFieldsClear()) {
             obj = CopyForModificationAt(obj, state, node);
             obj->ClearAllFields();
-            TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
-                  obj->id());
+            TRACE("Cleared all fields of @%d:#%d\n",
+                  status_analysis_->GetAlias(obj->id()), obj->id());
           }
         }
         break;
@@ -1035,7 +1119,7 @@
                                                      Node* node) {
   if (obj->NeedCopyForModification()) {
     state = CopyForModificationAt(state, node);
-    return state->Copy(obj, GetAlias(obj->id()));
+    return state->Copy(obj, status_analysis_->GetAlias(obj->id()));
   }
   return obj;
 }
@@ -1045,7 +1129,8 @@
 #ifdef DEBUG
   if (node->opcode() != IrOpcode::kLoadField &&
       node->opcode() != IrOpcode::kLoadElement &&
-      node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
+      node->opcode() != IrOpcode::kLoad &&
+      status_analysis_->IsDanglingEffectNode(node)) {
     PrintF("Dangeling effect node: #%d (%s)\n", node->id(),
            node->op()->mnemonic());
     UNREACHABLE();
@@ -1062,7 +1147,7 @@
           static_cast<void*>(virtual_states_[effect->id()]),
           effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
           node->id());
-    if (IsEffectBranchPoint(effect) ||
+    if (status_analysis_->IsEffectBranchPoint(effect) ||
         OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
       virtual_states_[node->id()]->SetCopyRequired();
       TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
@@ -1075,7 +1160,7 @@
 void EscapeAnalysis::ProcessStart(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStart);
   virtual_states_[node->id()] =
-      new (zone()) VirtualState(node, zone(), AliasCount());
+      new (zone()) VirtualState(node, zone(), status_analysis_->AliasCount());
 }
 
 bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
@@ -1084,7 +1169,8 @@
 
   VirtualState* mergeState = virtual_states_[node->id()];
   if (!mergeState) {
-    mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+    mergeState =
+        new (zone()) VirtualState(node, zone(), status_analysis_->AliasCount());
     virtual_states_[node->id()] = mergeState;
     changed = true;
     TRACE("Effect Phi #%d got new virtual state %p.\n", node->id(),
@@ -1102,7 +1188,8 @@
     if (state) {
       cache_->states().push_back(state);
       if (state == mergeState) {
-        mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+        mergeState = new (zone())
+            VirtualState(node, zone(), status_analysis_->AliasCount());
         virtual_states_[node->id()] = mergeState;
         changed = true;
       }
@@ -1122,7 +1209,7 @@
   TRACE("Merge %s the node.\n", changed ? "changed" : "did not change");
 
   if (changed) {
-    status_analysis_.ResizeStatusVector();
+    status_analysis_->ResizeStatusVector();
   }
   return changed;
 }
@@ -1131,7 +1218,7 @@
   DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
   ForwardVirtualState(node);
   VirtualState* state = virtual_states_[node->id()];
-  Alias alias = GetAlias(node->id());
+  Alias alias = status_analysis_->GetAlias(node->id());
 
   // Check if we have already processed this node.
   if (state->VirtualObjectFromAlias(alias)) {
@@ -1163,19 +1250,16 @@
   Node* allocation = NodeProperties::GetValueInput(node, 0);
   if (allocation->opcode() == IrOpcode::kAllocate) {
     VirtualState* state = virtual_states_[node->id()];
-    VirtualObject* obj = state->VirtualObjectFromAlias(GetAlias(node->id()));
+    VirtualObject* obj =
+        state->VirtualObjectFromAlias(status_analysis_->GetAlias(node->id()));
     DCHECK_NOT_NULL(obj);
     obj->SetInitialized();
   }
 }
 
-Node* EscapeAnalysis::replacement(NodeId id) {
-  if (id >= replacements_.size()) return nullptr;
-  return replacements_[id];
-}
-
 Node* EscapeAnalysis::replacement(Node* node) {
-  return replacement(node->id());
+  if (node->id() >= replacements_.size()) return nullptr;
+  return replacements_[node->id()];
 }
 
 bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
@@ -1206,41 +1290,25 @@
 }
 
 Node* EscapeAnalysis::GetReplacement(Node* node) {
-  return GetReplacement(node->id());
-}
-
-Node* EscapeAnalysis::GetReplacement(NodeId id) {
-  Node* node = nullptr;
-  while (replacement(id)) {
-    node = replacement(id);
-    id = node->id();
+  Node* result = nullptr;
+  while (replacement(node)) {
+    node = result = replacement(node);
   }
-  return node;
+  return result;
 }
 
 bool EscapeAnalysis::IsVirtual(Node* node) {
-  if (node->id() >= status_analysis_.GetStatusVectorSize()) {
+  if (node->id() >= status_analysis_->GetStatusVectorSize()) {
     return false;
   }
-  return status_analysis_.IsVirtual(node);
+  return status_analysis_->IsVirtual(node);
 }
 
 bool EscapeAnalysis::IsEscaped(Node* node) {
-  if (node->id() >= status_analysis_.GetStatusVectorSize()) {
+  if (node->id() >= status_analysis_->GetStatusVectorSize()) {
     return false;
   }
-  return status_analysis_.IsEscaped(node);
-}
-
-bool EscapeAnalysis::SetEscaped(Node* node) {
-  return status_analysis_.SetEscaped(node);
-}
-
-VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
-  if (VirtualState* states = virtual_states_[at->id()]) {
-    return states->VirtualObjectFromAlias(GetAlias(id));
-  }
-  return nullptr;
+  return status_analysis_->IsEscaped(node);
 }
 
 bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
@@ -1269,7 +1337,7 @@
   }
 
   cache_->LoadVirtualObjectsForFieldsFrom(state,
-                                          status_analysis_.GetAliasMap());
+                                          status_analysis_->GetAliasMap());
   if (cache_->objects().size() == cache_->fields().size()) {
     cache_->GetFields(offset);
     if (cache_->fields().size() == cache_->objects().size()) {
@@ -1280,7 +1348,7 @@
         Node* phi = graph()->NewNode(
             common()->Phi(MachineRepresentation::kTagged, value_input_count),
             value_input_count + 1, &cache_->fields().front());
-        status_analysis_.ResizeStatusVector();
+        status_analysis_->ResizeStatusVector();
         SetReplacement(load, phi);
         TRACE(" got phi created.\n");
       } else {
@@ -1360,7 +1428,7 @@
     }
   } else {
     // We have a load from a non-const index, cannot eliminate object.
-    if (SetEscaped(from)) {
+    if (status_analysis_->SetEscaped(from)) {
       TRACE(
           "Setting #%d (%s) to escaped because load element #%d from non-const "
           "index #%d (%s)\n",
@@ -1415,7 +1483,7 @@
     }
   } else {
     // We have a store to a non-const index, cannot eliminate object.
-    if (SetEscaped(to)) {
+    if (status_analysis_->SetEscaped(to)) {
       TRACE(
           "Setting #%d (%s) to escaped because store element #%d to non-const "
           "index #%d (%s)\n",
@@ -1426,8 +1494,8 @@
       if (!obj->AllFieldsClear()) {
         obj = CopyForModificationAt(obj, state, node);
         obj->ClearAllFields();
-        TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
-              obj->id());
+        TRACE("Cleared all fields of @%d:#%d\n",
+              status_analysis_->GetAlias(obj->id()), obj->id());
       }
     }
   }
@@ -1475,21 +1543,17 @@
   return nullptr;
 }
 
-void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
-  PrintF("  Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
-         object->field_count());
-  for (size_t i = 0; i < object->field_count(); ++i) {
-    if (Node* f = object->GetField(i)) {
-      PrintF("    Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
-    }
-  }
-}
-
 void EscapeAnalysis::DebugPrintState(VirtualState* state) {
   PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
-  for (Alias alias = 0; alias < AliasCount(); ++alias) {
+  for (Alias alias = 0; alias < status_analysis_->AliasCount(); ++alias) {
     if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
-      DebugPrintObject(object, alias);
+      PrintF("  Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
+             object->field_count());
+      for (size_t i = 0; i < object->field_count(); ++i) {
+        if (Node* f = object->GetField(i)) {
+          PrintF("    Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
+        }
+      }
     }
   }
 }
@@ -1511,17 +1575,17 @@
 
 VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
                                                 Node* node) {
-  if (node->id() >= status_analysis_.GetAliasMap().size()) return nullptr;
-  Alias alias = GetAlias(node->id());
+  if (node->id() >= status_analysis_->GetAliasMap().size()) return nullptr;
+  Alias alias = status_analysis_->GetAlias(node->id());
   if (alias >= state->size()) return nullptr;
   return state->VirtualObjectFromAlias(alias);
 }
 
 bool EscapeAnalysis::ExistsVirtualAllocate() {
-  for (size_t id = 0; id < status_analysis_.GetAliasMap().size(); ++id) {
-    Alias alias = GetAlias(static_cast<NodeId>(id));
+  for (size_t id = 0; id < status_analysis_->GetAliasMap().size(); ++id) {
+    Alias alias = status_analysis_->GetAlias(static_cast<NodeId>(id));
     if (alias < EscapeStatusAnalysis::kUntrackable) {
-      if (status_analysis_.IsVirtual(static_cast<int>(id))) {
+      if (status_analysis_->IsVirtual(static_cast<int>(id))) {
         return true;
       }
     }
@@ -1529,6 +1593,8 @@
   return false;
 }
 
+Graph* EscapeAnalysis::graph() const { return status_analysis_->graph(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index c3f236d..139abd7 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -5,7 +5,6 @@
 #ifndef V8_COMPILER_ESCAPE_ANALYSIS_H_
 #define V8_COMPILER_ESCAPE_ANALYSIS_H_
 
-#include "src/base/flags.h"
 #include "src/compiler/graph.h"
 
 namespace v8 {
@@ -14,107 +13,15 @@
 
 // Forward declarations.
 class CommonOperatorBuilder;
-class EscapeAnalysis;
+class EscapeStatusAnalysis;
+class MergeCache;
 class VirtualState;
 class VirtualObject;
 
-// EscapeStatusAnalysis determines for each allocation whether it escapes.
-class EscapeStatusAnalysis {
- public:
-  typedef NodeId Alias;
-  ~EscapeStatusAnalysis();
-
-  enum Status {
-    kUnknown = 0u,
-    kTracked = 1u << 0,
-    kEscaped = 1u << 1,
-    kOnStack = 1u << 2,
-    kVisited = 1u << 3,
-    // A node is dangling, if it is a load of some kind, and does not have
-    // an effect successor.
-    kDanglingComputed = 1u << 4,
-    kDangling = 1u << 5,
-    // A node is is an effect branch point, if it has more than 2 non-dangling
-    // effect successors.
-    kBranchPointComputed = 1u << 6,
-    kBranchPoint = 1u << 7,
-    kInQueue = 1u << 8
-  };
-  typedef base::Flags<Status, uint16_t> StatusFlags;
-
-  void RunStatusAnalysis();
-
-  bool IsVirtual(Node* node);
-  bool IsEscaped(Node* node);
-  bool IsAllocation(Node* node);
-
-  bool IsInQueue(NodeId id);
-  void SetInQueue(NodeId id, bool on_stack);
-
-  void DebugPrint();
-
-  EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
-                       Zone* zone);
-  void EnqueueForStatusAnalysis(Node* node);
-  bool SetEscaped(Node* node);
-  bool IsEffectBranchPoint(Node* node);
-  bool IsDanglingEffectNode(Node* node);
-  void ResizeStatusVector();
-  size_t GetStatusVectorSize();
-  bool IsVirtual(NodeId id);
-
-  Graph* graph() const { return graph_; }
-  Zone* zone() const { return zone_; }
-  void AssignAliases();
-  Alias GetAlias(NodeId id) const { return aliases_[id]; }
-  const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
-  Alias AliasCount() const { return next_free_alias_; }
-  static const Alias kNotReachable;
-  static const Alias kUntrackable;
-
-  bool IsNotReachable(Node* node);
-
- private:
-  void Process(Node* node);
-  void ProcessAllocate(Node* node);
-  void ProcessFinishRegion(Node* node);
-  void ProcessStoreField(Node* node);
-  void ProcessStoreElement(Node* node);
-  bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
-    return CheckUsesForEscape(node, node, phi_escaping);
-  }
-  bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
-  void RevisitUses(Node* node);
-  void RevisitInputs(Node* node);
-
-  Alias NextAlias() { return next_free_alias_++; }
-
-  bool HasEntry(Node* node);
-
-  bool IsAllocationPhi(Node* node);
-
-  ZoneVector<Node*> stack_;
-  EscapeAnalysis* object_analysis_;
-  Graph* const graph_;
-  Zone* const zone_;
-  ZoneVector<StatusFlags> status_;
-  Alias next_free_alias_;
-  ZoneVector<Node*> status_stack_;
-  ZoneVector<Alias> aliases_;
-
-  DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
-
-// Forward Declaration.
-class MergeCache;
-
 // EscapeObjectAnalysis simulates stores to determine values of loads if
 // an object is virtual and eliminated.
 class EscapeAnalysis {
  public:
-  using Alias = EscapeStatusAnalysis::Alias;
   EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
   ~EscapeAnalysis();
 
@@ -148,13 +55,9 @@
   VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
   VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
                                        Node* node);
-  VirtualObject* GetVirtualObject(Node* at, NodeId id);
 
-  bool SetEscaped(Node* node);
-  Node* replacement(NodeId id);
   Node* replacement(Node* node);
   Node* ResolveReplacement(Node* node);
-  Node* GetReplacement(NodeId id);
   bool SetReplacement(Node* node, Node* rep);
   bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
 
@@ -162,25 +65,14 @@
 
   void DebugPrint();
   void DebugPrintState(VirtualState* state);
-  void DebugPrintObject(VirtualObject* state, Alias id);
 
-  Graph* graph() const { return status_analysis_.graph(); }
-  Zone* zone() const { return status_analysis_.zone(); }
+  Graph* graph() const;
+  Zone* zone() const { return zone_; }
   CommonOperatorBuilder* common() const { return common_; }
-  bool IsEffectBranchPoint(Node* node) {
-    return status_analysis_.IsEffectBranchPoint(node);
-  }
-  bool IsDanglingEffectNode(Node* node) {
-    return status_analysis_.IsDanglingEffectNode(node);
-  }
-  bool IsNotReachable(Node* node) {
-    return status_analysis_.IsNotReachable(node);
-  }
-  Alias GetAlias(NodeId id) const { return status_analysis_.GetAlias(id); }
-  Alias AliasCount() const { return status_analysis_.AliasCount(); }
 
-  EscapeStatusAnalysis status_analysis_;
+  Zone* const zone_;
   CommonOperatorBuilder* const common_;
+  EscapeStatusAnalysis* status_analysis_;
   ZoneVector<VirtualState*> virtual_states_;
   ZoneVector<Node*> replacements_;
   MergeCache* cache_;
diff --git a/src/compiler/frame.cc b/src/compiler/frame.cc
index 3d93e15..e0284c8 100644
--- a/src/compiler/frame.cc
+++ b/src/compiler/frame.cc
@@ -12,15 +12,13 @@
 namespace internal {
 namespace compiler {
 
-Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
+Frame::Frame(int fixed_frame_size_in_slots)
     : frame_slot_count_(fixed_frame_size_in_slots),
-      callee_saved_slot_count_(0),
       spill_slot_count_(0),
       allocated_registers_(nullptr),
       allocated_double_registers_(nullptr) {}
 
 int Frame::AlignFrame(int alignment) {
-  DCHECK_EQ(0, callee_saved_slot_count_);
   int alignment_slots = alignment / kPointerSize;
   int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
   if (delta != alignment_slots) {
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index d413d3e..de2ae1a 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -78,14 +78,10 @@
 //
 class Frame : public ZoneObject {
  public:
-  explicit Frame(int fixed_frame_size_in_slots,
-                 const CallDescriptor* descriptor);
+  explicit Frame(int fixed_frame_size_in_slots);
 
   inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
 
-  inline int GetSavedCalleeRegisterSlotCount() const {
-    return callee_saved_slot_count_;
-  }
   inline int GetSpillSlotCount() const { return spill_slot_count_; }
 
   void SetAllocatedRegisters(BitVector* regs) {
@@ -102,23 +98,20 @@
     return !allocated_double_registers_->IsEmpty();
   }
 
-  int AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
-    DCHECK_EQ(0, callee_saved_slot_count_);
+  void AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
     int alignment_slots = alignment / kPointerSize;
     int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
     if (delta != alignment_slots) {
       frame_slot_count_ += delta;
     }
-    return delta;
+    spill_slot_count_ += delta;
   }
 
   void AllocateSavedCalleeRegisterSlots(int count) {
     frame_slot_count_ += count;
-    callee_saved_slot_count_ += count;
   }
 
   int AllocateSpillSlot(int width) {
-    DCHECK_EQ(0, callee_saved_slot_count_);
     int frame_slot_count_before = frame_slot_count_;
     int slot = AllocateAlignedFrameSlot(width);
     spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
@@ -128,7 +121,6 @@
   int AlignFrame(int alignment = kDoubleSize);
 
   int ReserveSpillSlots(size_t slot_count) {
-    DCHECK_EQ(0, callee_saved_slot_count_);
     DCHECK_EQ(0, spill_slot_count_);
     spill_slot_count_ += static_cast<int>(slot_count);
     frame_slot_count_ += static_cast<int>(slot_count);
@@ -152,7 +144,6 @@
 
  private:
   int frame_slot_count_;
-  int callee_saved_slot_count_;
   int spill_slot_count_;
   BitVector* allocated_registers_;
   BitVector* allocated_double_registers_;
@@ -191,13 +182,13 @@
 // current function's frame.
 class FrameAccessState : public ZoneObject {
  public:
-  explicit FrameAccessState(Frame* const frame)
+  explicit FrameAccessState(const Frame* const frame)
       : frame_(frame),
         access_frame_with_fp_(false),
         sp_delta_(0),
         has_frame_(false) {}
 
-  Frame* frame() const { return frame_; }
+  const Frame* frame() const { return frame_; }
   void MarkHasFrame(bool state);
 
   int sp_delta() const { return sp_delta_; }
@@ -229,7 +220,7 @@
   FrameOffset GetFrameOffset(int spill_slot) const;
 
  private:
-  Frame* const frame_;
+  const Frame* const frame_;
   bool access_frame_with_fp_;
   int sp_delta_;
   bool has_frame_;
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index 35e91fa..9403d35 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -94,7 +94,7 @@
 
   DCHECK((*blocker)->IsPending());
   // Ensure source is a register or both are stack slots, to limit swap cases.
-  if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+  if (source.IsStackSlot() || source.IsFPStackSlot()) {
     std::swap(source, destination);
   }
   assembler_->AssembleSwap(&source, &destination);
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
index 6f583d6..2ef1ba1 100644
--- a/src/compiler/graph-reducer.cc
+++ b/src/compiler/graph-reducer.cc
@@ -222,7 +222,11 @@
         edge.UpdateTo(dead_);
         Revisit(user);
       } else {
-        UNREACHABLE();
+        DCHECK_NOT_NULL(control);
+        edge.UpdateTo(control);
+        Revisit(user);
+        // TODO(jarin) Check that the node cannot throw (otherwise, it
+        // would have to be connected via IfSuccess/IfException).
       }
     } else if (NodeProperties::IsEffectEdge(edge)) {
       DCHECK_NOT_NULL(effect);
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 301e390..1dc38df 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -25,9 +25,8 @@
 namespace internal {
 namespace compiler {
 
-
-FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
-                            const char* suffix, const char* mode) {
+base::SmartArrayPointer<const char> GetVisualizerLogFileName(
+    CompilationInfo* info, const char* phase, const char* suffix) {
   EmbeddedVector<char, 256> filename(0);
   base::SmartArrayPointer<char> debug_name = info->GetDebugName();
   if (strlen(debug_name.get()) > 0) {
@@ -46,7 +45,11 @@
   } else {
     SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
   }
-  return base::OS::FOpen(full_filename.start(), mode);
+
+  char* buffer = new char[full_filename.length() + 1];
+  memcpy(buffer, full_filename.start(), full_filename.length());
+  buffer[full_filename.length()] = '\0';
+  return base::SmartArrayPointer<const char>(buffer);
 }
 
 
@@ -536,7 +539,7 @@
     os_ << vreg << ":" << range->relative_id() << " " << type;
     if (range->HasRegisterAssigned()) {
       AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
-      if (op.IsDoubleRegister()) {
+      if (op.IsFPRegister()) {
         DoubleRegister assigned_reg = op.GetDoubleRegister();
         os_ << " \"" << assigned_reg.ToString() << "\"";
       } else {
@@ -555,7 +558,7 @@
             << "\"";
       } else {
         index = AllocatedOperand::cast(top->GetSpillOperand())->index();
-        if (top->kind() == DOUBLE_REGISTERS) {
+        if (top->kind() == FP_REGISTERS) {
           os_ << " \"double_stack:" << index << "\"";
         } else if (top->kind() == GENERAL_REGISTERS) {
           os_ << " \"stack:" << index << "\"";
@@ -640,7 +643,13 @@
         if (j++ > 0) os << ", ";
         os << "#" << SafeId(i) << ":" << SafeMnemonic(i);
       }
-      os << ")" << std::endl;
+      os << ")";
+      if (NodeProperties::IsTyped(n)) {
+        os << "  [Type: ";
+        NodeProperties::GetType(n)->PrintTo(os);
+        os << "]";
+      }
+      os << std::endl;
     }
   }
   return os;
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
index 1a971a5..85b0cf7 100644
--- a/src/compiler/graph-visualizer.h
+++ b/src/compiler/graph-visualizer.h
@@ -8,6 +8,8 @@
 #include <stdio.h>
 #include <iosfwd>
 
+#include "src/base/smart-pointers.h"
+
 namespace v8 {
 namespace internal {
 
@@ -21,8 +23,8 @@
 class Schedule;
 class SourcePositionTable;
 
-FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
-                            const char* suffix, const char* mode);
+base::SmartArrayPointer<const char> GetVisualizerLogFileName(
+    CompilationInfo* info, const char* phase, const char* suffix);
 
 struct AsJSON {
   AsJSON(const Graph& g, SourcePositionTable* p) : graph(g), positions(p) {}
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index ee05ad0..a9083e1 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -44,11 +44,11 @@
     if (op->IsRegister()) {
       DCHECK(extra == 0);
       return Operand(ToRegister(op));
-    } else if (op->IsDoubleRegister()) {
+    } else if (op->IsFPRegister()) {
       DCHECK(extra == 0);
       return Operand(ToDoubleRegister(op));
     }
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
   }
 
@@ -59,12 +59,18 @@
   }
 
   Operand HighOperand(InstructionOperand* op) {
-    DCHECK(op->IsDoubleStackSlot());
+    DCHECK(op->IsFPStackSlot());
     return ToOperand(op, kPointerSize);
   }
 
   Immediate ToImmediate(InstructionOperand* operand) {
     Constant constant = ToConstant(operand);
+    if (constant.type() == Constant::kInt32 &&
+        (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+         constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+      return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
+                       constant.rmode());
+    }
     switch (constant.type()) {
       case Constant::kInt32:
         return Immediate(constant.ToInt32());
@@ -362,8 +368,6 @@
   __ pop(ebp);
 }
 
-void CodeGenerator::AssembleSetupStackPointer() {}
-
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -424,7 +428,8 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   IA32OperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -462,6 +467,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!HasImmediateInput(instr, 0));
+      Register reg = i.InputRegister(0);
+      __ jmp(reg);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
@@ -534,7 +548,9 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -965,14 +981,14 @@
       __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
     case kSSEFloat64ExtractLowWord32:
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ mov(i.OutputRegister(), i.InputOperand(0));
       } else {
         __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
       }
       break;
     case kSSEFloat64ExtractHighWord32:
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
       } else {
         __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
@@ -1161,7 +1177,7 @@
       }
       break;
     case kIA32BitcastFI:
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ mov(i.OutputRegister(), i.InputOperand(0));
       } else {
         __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -1210,7 +1226,7 @@
       break;
     }
     case kIA32PushFloat32:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ movss(Operand(esp, 0), i.InputDoubleRegister(0));
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1227,7 +1243,7 @@
       }
       break;
     case kIA32PushFloat64:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1244,7 +1260,7 @@
       }
       break;
     case kIA32Push:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1265,6 +1281,24 @@
       }
       break;
     }
+    case kIA32Xchgb: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchg_b(i.InputRegister(index), operand);
+      break;
+    }
+    case kIA32Xchgw: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchg_w(i.InputRegister(index), operand);
+      break;
+    }
+    case kIA32Xchgl: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchg(i.InputRegister(index), operand);
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
       break;
@@ -1311,7 +1345,18 @@
     case kCheckedStoreWord64:
       UNREACHABLE();  // currently unsupported checked int64 load/store.
       break;
+    case kAtomicLoadInt8:
+    case kAtomicLoadUint8:
+    case kAtomicLoadInt16:
+    case kAtomicLoadUint16:
+    case kAtomicLoadWord32:
+    case kAtomicStoreWord8:
+    case kAtomicStoreWord16:
+    case kAtomicStoreWord32:
+      UNREACHABLE();  // Won't be generated by instruction selector.
+      break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 
@@ -1485,12 +1530,13 @@
   __ jmp(Operand::JumpTable(input, times_4, table));
 }
 
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  return kSuccess;
 }
 
 
@@ -1621,8 +1667,21 @@
 //                                            | RET | args |  caller frame |
 //                                            ^ esp                        ^ ebp
 
+void CodeGenerator::FinishFrame(Frame* frame) {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {  // Save callee-saved registers.
+    DCHECK(!info()->is_osr());
+    int pushed = 0;
+    for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+      if (!((1 << i) & saves)) continue;
+      ++pushed;
+    }
+    frame->AllocateSavedCalleeRegisterSlots(pushed);
+  }
+}
 
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (frame_access_state()->has_frame()) {
     if (descriptor->IsCFunctionCall()) {
@@ -1634,7 +1693,9 @@
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
   }
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+  int shrink_slots = frame()->GetSpillSlotCount();
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1645,12 +1706,12 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
   const RegList saves = descriptor->CalleeSavedRegisters();
-  if (stack_shrink_slots > 0) {
-    __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+  if (shrink_slots > 0) {
+    __ sub(esp, Immediate(shrink_slots * kPointerSize));
   }
 
   if (saves != 0) {  // Save callee-saved registers.
@@ -1661,7 +1722,6 @@
       __ push(Register::from_code(i));
       ++pushed;
     }
-    frame()->AllocateSavedCalleeRegisterSlots(pushed);
   }
 }
 
@@ -1756,11 +1816,11 @@
     } else if (src_constant.type() == Constant::kFloat32) {
       // TODO(turbofan): Can we do better here?
       uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         XMMRegister dst = g.ToDoubleRegister(destination);
         __ Move(dst, src);
       } else {
-        DCHECK(destination->IsDoubleStackSlot());
+        DCHECK(destination->IsFPStackSlot());
         Operand dst = g.ToOperand(destination);
         __ Move(dst, Immediate(src));
       }
@@ -1769,31 +1829,31 @@
       uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
       uint32_t lower = static_cast<uint32_t>(src);
       uint32_t upper = static_cast<uint32_t>(src >> 32);
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         XMMRegister dst = g.ToDoubleRegister(destination);
         __ Move(dst, src);
       } else {
-        DCHECK(destination->IsDoubleStackSlot());
+        DCHECK(destination->IsFPStackSlot());
         Operand dst0 = g.ToOperand(destination);
         Operand dst1 = g.HighOperand(destination);
         __ Move(dst0, Immediate(lower));
         __ Move(dst1, Immediate(upper));
       }
     }
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     XMMRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       XMMRegister dst = g.ToDoubleRegister(destination);
       __ movaps(dst, src);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       Operand dst = g.ToOperand(destination);
       __ movsd(dst, src);
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     Operand src = g.ToOperand(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       XMMRegister dst = g.ToDoubleRegister(destination);
       __ movsd(dst, src);
     } else {
@@ -1841,21 +1901,21 @@
     frame_access_state()->IncreaseSPDelta(-1);
     Operand src2 = g.ToOperand(source);
     __ pop(src2);
-  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+  } else if (source->IsFPRegister() && destination->IsFPRegister()) {
     // XMM register-register swap.
     XMMRegister src = g.ToDoubleRegister(source);
     XMMRegister dst = g.ToDoubleRegister(destination);
     __ movaps(kScratchDoubleReg, src);
     __ movaps(src, dst);
     __ movaps(dst, kScratchDoubleReg);
-  } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+  } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
     // XMM register-memory swap.
     XMMRegister reg = g.ToDoubleRegister(source);
     Operand other = g.ToOperand(destination);
     __ movsd(kScratchDoubleReg, other);
     __ movsd(other, reg);
     __ movaps(reg, kScratchDoubleReg);
-  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+  } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
     // Double-width memory-to-memory.
     Operand src0 = g.ToOperand(source);
     Operand src1 = g.HighOperand(source);
@@ -1881,9 +1941,6 @@
 }
 
 
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 3cf2094..79dd05e 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -113,7 +113,10 @@
   V(IA32PushFloat32)               \
   V(IA32PushFloat64)               \
   V(IA32Poke)                      \
-  V(IA32StackCheck)
+  V(IA32StackCheck)                \
+  V(IA32Xchgb)                     \
+  V(IA32Xchgw)                     \
+  V(IA32Xchgl)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index 803fdf6..f341db4 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -127,6 +127,11 @@
     case kIA32Poke:
       return kHasSideEffect;
 
+    case kIA32Xchgb:
+    case kIA32Xchgw:
+    case kIA32Xchgl:
+      return kIsLoadOperation | kHasSideEffect;
+
 #define CASE(Name) case k##Name:
     COMMON_ARCH_OPCODE_LIST(CASE)
 #undef CASE
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 5c4acce..9002d75 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -27,11 +27,15 @@
     return DefineAsRegister(node);
   }
 
-  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+                          int effect_level) {
     if (input->opcode() != IrOpcode::kLoad ||
         !selector()->CanCover(node, input)) {
       return false;
     }
+    if (effect_level != selector()->GetEffectLevel(input)) {
+      return false;
+    }
     MachineRepresentation rep =
         LoadRepresentationOf(input->op()).representation();
     switch (opcode) {
@@ -56,13 +60,20 @@
       case IrOpcode::kInt32Constant:
       case IrOpcode::kNumberConstant:
       case IrOpcode::kExternalConstant:
+      case IrOpcode::kRelocatableInt32Constant:
+      case IrOpcode::kRelocatableInt64Constant:
         return true;
       case IrOpcode::kHeapConstant: {
+// TODO(bmeurer): We must not dereference handles concurrently. If we
+// really have to this here, then we need to find a way to put this
+// information on the HeapConstant node already.
+#if 0
         // Constants in new space cannot be used as immediates in V8 because
         // the GC does not scan code objects when collecting the new generation.
         Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
         Isolate* isolate = value->GetIsolate();
         return !isolate->heap()->InNewSpace(*value);
+#endif
       }
       default:
         return false;
@@ -870,15 +881,12 @@
   VisitRO(this, node, kSSEFloat64ToFloat32);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+  VisitRR(this, node, kArchTruncateDoubleToI);
+}
 
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      return VisitRR(this, node, kArchTruncateDoubleToI);
-    case TruncationMode::kRoundToZero:
-      return VisitRO(this, node, kSSEFloat64ToInt32);
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  VisitRO(this, node, kSSEFloat64ToInt32);
 }
 
 
@@ -915,6 +923,9 @@
   VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
+}
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   IA32OperandGenerator g(this);
@@ -939,6 +950,9 @@
   VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
+}
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
@@ -1225,18 +1239,24 @@
 
   InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
 
+  int effect_level = selector->GetEffectLevel(node);
+  if (cont->IsBranch()) {
+    effect_level = selector->GetEffectLevel(
+        cont->true_block()->PredecessorAt(0)->control_input());
+  }
+
   // If one of the two inputs is an immediate, make sure it's on the right, or
   // if one of the two inputs is a memory operand, make sure it's on the left.
   if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
-      (g.CanBeMemoryOperand(narrowed_opcode, node, right) &&
-       !g.CanBeMemoryOperand(narrowed_opcode, node, left))) {
+      (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
+       !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (g.CanBeMemoryOperand(opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
       // TODO(epertoso): we should use `narrowed_opcode' here once we match
       // immediates too.
       return VisitCompareWithMemoryOperand(selector, opcode, left,
@@ -1247,7 +1267,7 @@
   }
 
   // Match memory operands on left side of comparison.
-  if (g.CanBeMemoryOperand(narrowed_opcode, node, left)) {
+  if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
     bool needs_byte_register =
         narrowed_opcode == kIA32Test8 || narrowed_opcode == kIA32Cmp8;
     return VisitCompareWithMemoryOperand(
@@ -1563,6 +1583,52 @@
        g.UseRegister(left), g.Use(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+         load_rep.representation() == MachineRepresentation::kWord16 ||
+         load_rep.representation() == MachineRepresentation::kWord32);
+  USE(load_rep);
+  VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  IA32OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kIA32Xchgb;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kIA32Xchgw;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kIA32Xchgl;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  AddressingMode addressing_mode;
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  inputs[input_count++] = g.UseUniqueRegister(base);
+  if (g.CanBeImmediate(index)) {
+    inputs[input_count++] = g.UseImmediate(index);
+    addressing_mode = kMode_MRI;
+  } else {
+    inputs[input_count++] = g.UseUniqueRegister(index);
+    addressing_mode = kMode_MR1;
+  }
+  inputs[input_count++] = g.UseUniqueRegister(value);
+  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+  Emit(code, 0, nullptr, input_count, inputs);
+}
 
 // static
 MachineOperatorBuilder::Flags
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index b005083..57868c6 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -48,6 +48,7 @@
   V(ArchCallJSFunction)                   \
   V(ArchTailCallJSFunctionFromJSFunction) \
   V(ArchTailCallJSFunction)               \
+  V(ArchTailCallAddress)                  \
   V(ArchPrepareCallCFunction)             \
   V(ArchCallCFunction)                    \
   V(ArchPrepareTailCall)                  \
@@ -77,7 +78,15 @@
   V(CheckedStoreWord64)                   \
   V(CheckedStoreFloat32)                  \
   V(CheckedStoreFloat64)                  \
-  V(ArchStackSlot)
+  V(ArchStackSlot)                        \
+  V(AtomicLoadInt8)                       \
+  V(AtomicLoadUint8)                      \
+  V(AtomicLoadInt16)                      \
+  V(AtomicLoadUint16)                     \
+  V(AtomicLoadWord32)                     \
+  V(AtomicStoreWord8)                     \
+  V(AtomicStoreWord16)                    \
+  V(AtomicStoreWord32)
 
 #define ARCH_OPCODE_LIST(V)  \
   COMMON_ARCH_OPCODE_LIST(V) \
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index b612cd1..b3e4bbc 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -82,7 +82,8 @@
       graph_(zone),
       last_side_effect_instr_(nullptr),
       pending_loads_(zone),
-      last_live_in_reg_marker_(nullptr) {
+      last_live_in_reg_marker_(nullptr),
+      last_deopt_(nullptr) {
 }
 
 
@@ -91,6 +92,7 @@
   DCHECK(last_side_effect_instr_ == nullptr);
   DCHECK(pending_loads_.empty());
   DCHECK(last_live_in_reg_marker_ == nullptr);
+  DCHECK(last_deopt_ == nullptr);
   sequence()->StartBlock(rpo);
 }
 
@@ -106,6 +108,7 @@
   last_side_effect_instr_ = nullptr;
   pending_loads_.clear();
   last_live_in_reg_marker_ = nullptr;
+  last_deopt_ = nullptr;
 }
 
 
@@ -128,6 +131,12 @@
       last_live_in_reg_marker_->AddSuccessor(new_node);
     }
 
+    // Make sure that new instructions are not scheduled before the last
+    // deoptimization point.
+    if (last_deopt_ != nullptr) {
+      last_deopt_->AddSuccessor(new_node);
+    }
+
     // Instructions with side effects and memory operations can't be
     // reordered with respect to each other.
     if (HasSideEffect(instr)) {
@@ -146,6 +155,13 @@
         last_side_effect_instr_->AddSuccessor(new_node);
       }
       pending_loads_.push_back(new_node);
+    } else if (instr->IsDeoptimizeCall()) {
+      // Ensure that deopts are not reordered with respect to side-effect
+      // instructions.
+      if (last_side_effect_instr_ != nullptr) {
+        last_side_effect_instr_->AddSuccessor(new_node);
+      }
+      last_deopt_ = new_node;
     }
 
     // Look for operand dependencies.
@@ -224,6 +240,7 @@
     case kArchTailCallCodeObject:
     case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction:
+    case kArchTailCallAddress:
       return kHasSideEffect | kIsBlockTerminator;
 
     case kArchDeoptimize:
@@ -253,6 +270,18 @@
     case kArchStoreWithWriteBarrier:
       return kHasSideEffect;
 
+    case kAtomicLoadInt8:
+    case kAtomicLoadUint8:
+    case kAtomicLoadInt16:
+    case kAtomicLoadUint16:
+    case kAtomicLoadWord32:
+      return kIsLoadOperation;
+
+    case kAtomicStoreWord8:
+    case kAtomicStoreWord16:
+    case kAtomicStoreWord32:
+      return kHasSideEffect;
+
 #define CASE(Name) case k##Name:
     TARGET_ARCH_OPCODE_LIST(CASE)
 #undef CASE
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
index 104c0b9..23950f7 100644
--- a/src/compiler/instruction-scheduler.h
+++ b/src/compiler/instruction-scheduler.h
@@ -180,7 +180,9 @@
     return (instr->arch_opcode() == kArchNop) &&
       (instr->OutputCount() == 1) &&
       (instr->OutputAt(0)->IsUnallocated()) &&
-      UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy();
+      (UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy() ||
+       UnallocatedOperand::cast(
+           instr->OutputAt(0))->HasFixedDoubleRegisterPolicy());
   }
 
   void ComputeTotalLatencies();
@@ -209,6 +211,9 @@
   // All these nops are chained together and added as a predecessor of every
   // other instructions in the basic block.
   ScheduleGraphNode* last_live_in_reg_marker_;
+
+  // Last deoptimization instruction encountered while building the graph.
+  ScheduleGraphNode* last_deopt_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index e750aed..301612c 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -211,6 +211,9 @@
         return Constant(OpParameter<int64_t>(node));
       case IrOpcode::kFloat32Constant:
         return Constant(OpParameter<float>(node));
+      case IrOpcode::kRelocatableInt32Constant:
+      case IrOpcode::kRelocatableInt64Constant:
+        return Constant(OpParameter<RelocatablePtrConstantInfo>(node));
       case IrOpcode::kFloat64Constant:
       case IrOpcode::kNumberConstant:
         return Constant(OpParameter<double>(node));
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index d172ed1..ea68c78 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -87,7 +87,6 @@
 #endif
 }
 
-
 void InstructionSelector::StartBlock(RpoNumber rpo) {
   if (FLAG_turbo_instruction_scheduling &&
       InstructionScheduler::SchedulerSupported()) {
@@ -714,6 +713,12 @@
     SetEffectLevel(node, effect_level);
   }
 
+  // We visit the control first, then the nodes in the block, so the block's
+  // control input should be on the same effect level as the last node.
+  if (block->control_input() != nullptr) {
+    SetEffectLevel(block->control_input(), effect_level);
+  }
+
   // Generate code for the block control "top down", but schedule the code
   // "bottom up".
   VisitControl(block);
@@ -859,8 +864,6 @@
       return MarkAsReference(node), VisitIfException(node);
     case IrOpcode::kFinishRegion:
       return MarkAsReference(node), VisitFinishRegion(node);
-    case IrOpcode::kGuard:
-      return MarkAsReference(node), VisitGuard(node);
     case IrOpcode::kParameter: {
       MachineType type =
           linkage()->GetParameterType(ParameterIndexOf(node->op()));
@@ -879,6 +882,8 @@
     case IrOpcode::kInt32Constant:
     case IrOpcode::kInt64Constant:
     case IrOpcode::kExternalConstant:
+    case IrOpcode::kRelocatableInt32Constant:
+    case IrOpcode::kRelocatableInt64Constant:
       return VisitConstant(node);
     case IrOpcode::kFloat32Constant:
       return MarkAsFloat32(node), VisitConstant(node);
@@ -1012,6 +1017,8 @@
       return VisitUint64LessThanOrEqual(node);
     case IrOpcode::kUint64Mod:
       return MarkAsWord64(node), VisitUint64Mod(node);
+    case IrOpcode::kBitcastWordToTagged:
+      return MarkAsReference(node), VisitBitcastWordToTagged(node);
     case IrOpcode::kChangeFloat32ToFloat64:
       return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
     case IrOpcode::kChangeInt32ToFloat64:
@@ -1042,10 +1049,12 @@
       return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
     case IrOpcode::kTruncateFloat64ToFloat32:
       return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
-    case IrOpcode::kTruncateFloat64ToInt32:
-      return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
+    case IrOpcode::kTruncateFloat64ToWord32:
+      return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
     case IrOpcode::kTruncateInt64ToInt32:
       return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+    case IrOpcode::kRoundFloat64ToInt32:
+      return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
     case IrOpcode::kRoundInt64ToFloat32:
       return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
     case IrOpcode::kRoundInt32ToFloat32:
@@ -1070,6 +1079,8 @@
       return MarkAsFloat32(node), VisitFloat32Add(node);
     case IrOpcode::kFloat32Sub:
       return MarkAsFloat32(node), VisitFloat32Sub(node);
+    case IrOpcode::kFloat32SubPreserveNan:
+      return MarkAsFloat32(node), VisitFloat32SubPreserveNan(node);
     case IrOpcode::kFloat32Mul:
       return MarkAsFloat32(node), VisitFloat32Mul(node);
     case IrOpcode::kFloat32Div:
@@ -1092,6 +1103,8 @@
       return MarkAsFloat64(node), VisitFloat64Add(node);
     case IrOpcode::kFloat64Sub:
       return MarkAsFloat64(node), VisitFloat64Sub(node);
+    case IrOpcode::kFloat64SubPreserveNan:
+      return MarkAsFloat64(node), VisitFloat64SubPreserveNan(node);
     case IrOpcode::kFloat64Mul:
       return MarkAsFloat64(node), VisitFloat64Mul(node);
     case IrOpcode::kFloat64Div:
@@ -1178,6 +1191,13 @@
       MarkAsWord32(NodeProperties::FindProjection(node, 0));
       MarkAsWord32(NodeProperties::FindProjection(node, 1));
       return VisitWord32PairSar(node);
+    case IrOpcode::kAtomicLoad: {
+      LoadRepresentation type = LoadRepresentationOf(node->op());
+      MarkAsRepresentation(type.representation(), node);
+      return VisitAtomicLoad(node);
+    }
+    case IrOpcode::kAtomicStore:
+      return VisitAtomicStore(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
@@ -1246,6 +1266,12 @@
        sequence()->AddImmediate(Constant(slot)), 0, nullptr);
 }
 
+void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
+  OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
 // 32 bit targets do not implement the following instructions.
 #if V8_TARGET_ARCH_32_BIT
 
@@ -1422,13 +1448,6 @@
 }
 
 
-void InstructionSelector::VisitGuard(Node* node) {
-  OperandGenerator g(this);
-  Node* value = node->InputAt(0);
-  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
-}
-
-
 void InstructionSelector::VisitParameter(Node* node) {
   OperandGenerator g(this);
   int index = ParameterIndexOf(node->op());
@@ -1449,7 +1468,7 @@
   OperandGenerator g(this);
   Node* call = node->InputAt(1);
   DCHECK_EQ(IrOpcode::kCall, call->opcode());
-  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call);
+  const CallDescriptor* descriptor = CallDescriptorOf(call->op());
   Emit(kArchNop,
        g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
                           descriptor->GetReturnType(0).representation()));
@@ -1521,7 +1540,7 @@
 
 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
   OperandGenerator g(this);
-  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+  const CallDescriptor* descriptor = CallDescriptorOf(node->op());
 
   FrameStateDescriptor* frame_state_descriptor = nullptr;
   if (descriptor->NeedsFrameState()) {
@@ -1589,10 +1608,8 @@
 
 void InstructionSelector::VisitTailCall(Node* node) {
   OperandGenerator g(this);
-  CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+  CallDescriptor const* descriptor = CallDescriptorOf(node->op());
   DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
-  DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
-  DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
 
   // TODO(turbofan): Relax restriction for stack parameters.
 
@@ -1635,6 +1652,9 @@
         case CallDescriptor::kCallJSFunction:
           opcode = kArchTailCallJSFunction;
           break;
+        case CallDescriptor::kCallAddress:
+          opcode = kArchTailCallAddress;
+          break;
         default:
           UNREACHABLE();
           return;
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 9c1cd4c..335099f 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -247,7 +247,6 @@
 #undef DECLARE_GENERATOR
 
   void VisitFinishRegion(Node* node);
-  void VisitGuard(Node* node);
   void VisitParameter(Node* node);
   void VisitIfException(Node* node);
   void VisitOsrValue(Node* node);
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index c757557..26aebca 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -127,12 +127,12 @@
       LocationOperand allocated = LocationOperand::cast(op);
       if (op.IsStackSlot()) {
         os << "[stack:" << LocationOperand::cast(op).index();
-      } else if (op.IsDoubleStackSlot()) {
-        os << "[double_stack:" << LocationOperand::cast(op).index();
+      } else if (op.IsFPStackSlot()) {
+        os << "[fp_stack:" << LocationOperand::cast(op).index();
       } else if (op.IsRegister()) {
         os << "[" << LocationOperand::cast(op).GetRegister().ToString() << "|R";
       } else {
-        DCHECK(op.IsDoubleRegister());
+        DCHECK(op.IsFPRegister());
         os << "[" << LocationOperand::cast(op).GetDoubleRegister().ToString()
            << "|R";
       }
@@ -251,17 +251,16 @@
                  DoubleRegister::from_code(index).IsAllocatable());
 }
 
-
 Instruction::Instruction(InstructionCode opcode)
     : opcode_(opcode),
       bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
                  TempCountField::encode(0) | IsCallField::encode(false)),
-      reference_map_(nullptr) {
+      reference_map_(nullptr),
+      block_(nullptr) {
   parallel_moves_[0] = nullptr;
   parallel_moves_[1] = nullptr;
 }
 
-
 Instruction::Instruction(InstructionCode opcode, size_t output_count,
                          InstructionOperand* outputs, size_t input_count,
                          InstructionOperand* inputs, size_t temp_count,
@@ -271,7 +270,8 @@
                  InputCountField::encode(input_count) |
                  TempCountField::encode(temp_count) |
                  IsCallField::encode(false)),
-      reference_map_(nullptr) {
+      reference_map_(nullptr),
+      block_(nullptr) {
   parallel_moves_[0] = nullptr;
   parallel_moves_[1] = nullptr;
   size_t offset = 0;
@@ -335,7 +335,7 @@
 void ReferenceMap::RecordReference(const AllocatedOperand& op) {
   // Do not record arguments as pointers.
   if (op.IsStackSlot() && LocationOperand::cast(op).index() < 0) return;
-  DCHECK(!op.IsDoubleRegister() && !op.IsDoubleStackSlot());
+  DCHECK(!op.IsFPRegister() && !op.IsFPStackSlot());
   reference_operands_.push_back(op);
 }
 
@@ -504,6 +504,27 @@
 
 Constant::Constant(int32_t v) : type_(kInt32), value_(v) {}
 
+Constant::Constant(RelocatablePtrConstantInfo info) {
+  if (info.type() == RelocatablePtrConstantInfo::kInt32) {
+    type_ = kInt32;
+  } else if (info.type() == RelocatablePtrConstantInfo::kInt64) {
+    type_ = kInt64;
+  } else {
+    UNREACHABLE();
+  }
+  value_ = info.value();
+  rmode_ = info.rmode();
+}
+
+Handle<HeapObject> Constant::ToHeapObject() const {
+  DCHECK_EQ(kHeapObject, type());
+  Handle<HeapObject> value(
+      bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
+  if (value->IsConsString()) {
+    value = String::Flatten(Handle<String>::cast(value), TENURED);
+  }
+  return value;
+}
 
 std::ostream& operator<<(std::ostream& os, const Constant& constant) {
   switch (constant.type()) {
@@ -603,7 +624,6 @@
   return instr_block;
 }
 
-
 InstructionBlocks* InstructionSequence::InstructionBlocksFor(
     Zone* zone, const Schedule* schedule) {
   InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
@@ -620,7 +640,7 @@
   return blocks;
 }
 
-void InstructionSequence::ValidateEdgeSplitForm() {
+void InstructionSequence::ValidateEdgeSplitForm() const {
   // Validate blocks are in edge-split form: no block with multiple successors
   // has an edge to a block (== a successor) with more than one predecessors.
   for (const InstructionBlock* block : instruction_blocks()) {
@@ -635,7 +655,7 @@
   }
 }
 
-void InstructionSequence::ValidateDeferredBlockExitPaths() {
+void InstructionSequence::ValidateDeferredBlockExitPaths() const {
   // A deferred block with more than one successor must have all its successors
   // deferred.
   for (const InstructionBlock* block : instruction_blocks()) {
@@ -646,7 +666,21 @@
   }
 }
 
-void InstructionSequence::ValidateSSA() {
+void InstructionSequence::ValidateDeferredBlockEntryPaths() const {
+  // If a deferred block has multiple predecessors, they have to
+  // all be deferred. Otherwise, we can run into a situation where a range
+  // that spills only in deferred blocks inserts its spill in the block, but
+  // other ranges need moves inserted by ResolveControlFlow in the predecessors,
+  // which may clobber the register of this range.
+  for (const InstructionBlock* block : instruction_blocks()) {
+    if (!block->IsDeferred() || block->PredecessorCount() <= 1) continue;
+    for (RpoNumber predecessor_id : block->predecessors()) {
+      CHECK(InstructionBlockAt(predecessor_id)->IsDeferred());
+    }
+  }
+}
+
+void InstructionSequence::ValidateSSA() const {
   // TODO(mtrofin): We could use a local zone here instead.
   BitVector definitions(VirtualRegisterCount(), zone());
   for (const Instruction* instruction : *this) {
@@ -675,7 +709,6 @@
   }
 }
 
-
 InstructionSequence::InstructionSequence(Isolate* isolate,
                                          Zone* instruction_zone,
                                          InstructionBlocks* instruction_blocks)
@@ -683,7 +716,6 @@
       zone_(instruction_zone),
       instruction_blocks_(instruction_blocks),
       source_positions_(zone()),
-      block_starts_(zone()),
       constants_(ConstantMap::key_compare(),
                  ConstantMap::allocator_type(zone())),
       immediates_(zone()),
@@ -691,10 +723,8 @@
       next_virtual_register_(0),
       reference_maps_(zone()),
       representations_(zone()),
-      deoptimization_entries_(zone()) {
-  block_starts_.reserve(instruction_blocks_->size());
-}
-
+      deoptimization_entries_(zone()),
+      current_block_(nullptr) {}
 
 int InstructionSequence::NextVirtualRegister() {
   int virtual_register = next_virtual_register_++;
@@ -710,28 +740,31 @@
 
 
 void InstructionSequence::StartBlock(RpoNumber rpo) {
-  DCHECK(block_starts_.size() == rpo.ToSize());
-  InstructionBlock* block = InstructionBlockAt(rpo);
+  DCHECK_NULL(current_block_);
+  current_block_ = InstructionBlockAt(rpo);
   int code_start = static_cast<int>(instructions_.size());
-  block->set_code_start(code_start);
-  block_starts_.push_back(code_start);
+  current_block_->set_code_start(code_start);
 }
 
 
 void InstructionSequence::EndBlock(RpoNumber rpo) {
   int end = static_cast<int>(instructions_.size());
-  InstructionBlock* block = InstructionBlockAt(rpo);
-  if (block->code_start() == end) {  // Empty block.  Insert a nop.
+  DCHECK_EQ(current_block_->rpo_number(), rpo);
+  if (current_block_->code_start() == end) {  // Empty block.  Insert a nop.
     AddInstruction(Instruction::New(zone(), kArchNop));
     end = static_cast<int>(instructions_.size());
   }
-  DCHECK(block->code_start() >= 0 && block->code_start() < end);
-  block->set_code_end(end);
+  DCHECK(current_block_->code_start() >= 0 &&
+         current_block_->code_start() < end);
+  current_block_->set_code_end(end);
+  current_block_ = nullptr;
 }
 
 
 int InstructionSequence::AddInstruction(Instruction* instr) {
+  DCHECK_NOT_NULL(current_block_);
   int index = static_cast<int>(instructions_.size());
+  instr->set_block(current_block_);
   instructions_.push_back(instr);
   if (instr->NeedsReferenceMap()) {
     DCHECK(instr->reference_map() == nullptr);
@@ -746,18 +779,7 @@
 
 InstructionBlock* InstructionSequence::GetInstructionBlock(
     int instruction_index) const {
-  DCHECK(instruction_blocks_->size() == block_starts_.size());
-  auto begin = block_starts_.begin();
-  auto end = std::lower_bound(begin, block_starts_.end(), instruction_index);
-  // Post condition of std::lower_bound:
-  DCHECK(end == block_starts_.end() || *end >= instruction_index);
-  if (end == block_starts_.end() || *end > instruction_index) --end;
-  DCHECK(*end <= instruction_index);
-  size_t index = std::distance(begin, end);
-  InstructionBlock* block = instruction_blocks_->at(index);
-  DCHECK(block->code_start() <= instruction_index &&
-         instruction_index < block->code_end());
-  return block;
+  return instructions()[instruction_index]->block();
 }
 
 
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index a1fe494..851ba24 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -66,9 +66,13 @@
 
   inline bool IsAnyRegister() const;
   inline bool IsRegister() const;
+  inline bool IsFPRegister() const;
+  inline bool IsFloatRegister() const;
   inline bool IsDoubleRegister() const;
   inline bool IsSimd128Register() const;
   inline bool IsStackSlot() const;
+  inline bool IsFPStackSlot() const;
+  inline bool IsFloatStackSlot() const;
   inline bool IsDoubleStackSlot() const;
   inline bool IsSimd128StackSlot() const;
 
@@ -413,7 +417,7 @@
   }
 
   int index() const {
-    DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSimd128StackSlot());
+    DCHECK(IsStackSlot() || IsFPStackSlot());
     return static_cast<int64_t>(value_) >> IndexField::kShift;
   }
 
@@ -423,8 +427,16 @@
                                IndexField::kShift);
   }
 
+  FloatRegister GetFloatRegister() const {
+    DCHECK(IsFloatRegister());
+    return FloatRegister::from_code(static_cast<int64_t>(value_) >>
+                                    IndexField::kShift);
+  }
+
   DoubleRegister GetDoubleRegister() const {
-    DCHECK(IsDoubleRegister());
+    // TODO(bbudge) Tighten this test to IsDoubleRegister when all code
+    // generators are changed to use the correct Get*Register method.
+    DCHECK(IsFPRegister());
     return DoubleRegister::from_code(static_cast<int64_t>(value_) >>
                                      IndexField::kShift);
   }
@@ -526,11 +538,23 @@
          !IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
-bool InstructionOperand::IsDoubleRegister() const {
+bool InstructionOperand::IsFPRegister() const {
   return IsAnyRegister() &&
          IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
+bool InstructionOperand::IsFloatRegister() const {
+  return IsAnyRegister() &&
+         LocationOperand::cast(this)->representation() ==
+             MachineRepresentation::kFloat32;
+}
+
+bool InstructionOperand::IsDoubleRegister() const {
+  return IsAnyRegister() &&
+         LocationOperand::cast(this)->representation() ==
+             MachineRepresentation::kFloat64;
+}
+
 bool InstructionOperand::IsSimd128Register() const {
   return IsAnyRegister() &&
          LocationOperand::cast(this)->representation() ==
@@ -544,13 +568,29 @@
          !IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
-bool InstructionOperand::IsDoubleStackSlot() const {
+bool InstructionOperand::IsFPStackSlot() const {
   return (IsAllocated() || IsExplicit()) &&
          LocationOperand::cast(this)->location_kind() ==
              LocationOperand::STACK_SLOT &&
          IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
+bool InstructionOperand::IsFloatStackSlot() const {
+  return (IsAllocated() || IsExplicit()) &&
+         LocationOperand::cast(this)->location_kind() ==
+             LocationOperand::STACK_SLOT &&
+         LocationOperand::cast(this)->representation() ==
+             MachineRepresentation::kFloat32;
+}
+
+bool InstructionOperand::IsDoubleStackSlot() const {
+  return (IsAllocated() || IsExplicit()) &&
+         LocationOperand::cast(this)->location_kind() ==
+             LocationOperand::STACK_SLOT &&
+         LocationOperand::cast(this)->representation() ==
+             MachineRepresentation::kFloat64;
+}
+
 bool InstructionOperand::IsSimd128StackSlot() const {
   return (IsAllocated() || IsExplicit()) &&
          LocationOperand::cast(this)->location_kind() ==
@@ -715,6 +755,8 @@
 
 std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
 
+class InstructionBlock;
+
 class Instruction final {
  public:
   size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
@@ -826,7 +868,8 @@
     return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
            arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
            arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
-           arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction;
+           arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction ||
+           arch_opcode() == ArchOpcode::kArchTailCallAddress;
   }
   bool IsThrow() const {
     return arch_opcode() == ArchOpcode::kArchThrowTerminator;
@@ -859,6 +902,15 @@
   ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
   ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
 
+  // The block_id may be invalidated in JumpThreading. It is only important for
+  // register allocation, to avoid searching for blocks from instruction
+  // indexes.
+  InstructionBlock* block() const { return block_; }
+  void set_block(InstructionBlock* block) {
+    DCHECK_NOT_NULL(block);
+    block_ = block;
+  }
+
   void Print(const RegisterConfiguration* config) const;
   void Print() const;
 
@@ -879,6 +931,7 @@
   uint32_t bit_field_;
   ParallelMove* parallel_moves_[2];
   ReferenceMap* reference_map_;
+  InstructionBlock* block_;
   InstructionOperand operands_[1];
 
   DISALLOW_COPY_AND_ASSIGN(Instruction);
@@ -950,9 +1003,12 @@
   explicit Constant(Handle<HeapObject> obj)
       : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
   explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
+  explicit Constant(RelocatablePtrConstantInfo info);
 
   Type type() const { return type_; }
 
+  RelocInfo::Mode rmode() const { return rmode_; }
+
   int32_t ToInt32() const {
     DCHECK(type() == kInt32 || type() == kInt64);
     const int32_t value = static_cast<int32_t>(value_);
@@ -987,14 +1043,16 @@
     return RpoNumber::FromInt(static_cast<int>(value_));
   }
 
-  Handle<HeapObject> ToHeapObject() const {
-    DCHECK_EQ(kHeapObject, type());
-    return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
-  }
+  Handle<HeapObject> ToHeapObject() const;
 
  private:
   Type type_;
   int64_t value_;
+#if V8_TARGET_ARCH_32_BIT
+  RelocInfo::Mode rmode_ = RelocInfo::NONE32;
+#else
+  RelocInfo::Mode rmode_ = RelocInfo::NONE64;
+#endif
 };
 
 
@@ -1316,7 +1374,8 @@
   Immediates& immediates() { return immediates_; }
 
   ImmediateOperand AddImmediate(const Constant& constant) {
-    if (constant.type() == Constant::kInt32) {
+    if (constant.type() == Constant::kInt32 &&
+        RelocInfo::IsNone(constant.rmode())) {
       return ImmediateOperand(ImmediateOperand::INLINE, constant.ToInt32());
     }
     int index = static_cast<int>(immediates_.size());
@@ -1374,9 +1433,10 @@
   void PrintBlock(const RegisterConfiguration* config, int block_id) const;
   void PrintBlock(int block_id) const;
 
-  void ValidateEdgeSplitForm();
-  void ValidateDeferredBlockExitPaths();
-  void ValidateSSA();
+  void ValidateEdgeSplitForm() const;
+  void ValidateDeferredBlockExitPaths() const;
+  void ValidateDeferredBlockEntryPaths() const;
+  void ValidateSSA() const;
 
  private:
   friend std::ostream& operator<<(std::ostream& os,
@@ -1388,7 +1448,6 @@
   Zone* const zone_;
   InstructionBlocks* const instruction_blocks_;
   SourcePositionMap source_positions_;
-  IntVector block_starts_;
   ConstantMap constants_;
   Immediates immediates_;
   InstructionDeque instructions_;
@@ -1397,6 +1456,9 @@
   ZoneVector<MachineRepresentation> representations_;
   DeoptimizationVector deoptimization_entries_;
 
+  // Used at construction time
+  InstructionBlock* current_block_;
+
   DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
 };
 
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 8824a03..830a0de 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -79,8 +79,10 @@
   return result;
 }
 
-static int GetParameterCountAfterLowering(
+int Int64Lowering::GetParameterCountAfterLowering(
     Signature<MachineRepresentation>* signature) {
+  // GetParameterIndexAfterLowering(parameter_count) returns the parameter count
+  // after lowering.
   return GetParameterIndexAfterLowering(
       signature, static_cast<int>(signature->parameter_count()));
 }
@@ -177,7 +179,9 @@
         NodeProperties::ChangeOp(node, store_op);
         ReplaceNode(node, node, high_node);
       } else {
-        DefaultLowering(node);
+        if (HasReplacementLow(node->InputAt(2))) {
+          node->ReplaceInput(2, GetReplacementLow(node->InputAt(2)));
+        }
       }
       break;
     }
@@ -223,7 +227,9 @@
       break;
     }
     case IrOpcode::kCall: {
-      CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+      // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+      CallDescriptor* descriptor =
+          const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
       if (DefaultLowering(node) ||
           (descriptor->ReturnCount() == 1 &&
            descriptor->GetReturnType(0) == MachineType::Int64())) {
@@ -262,9 +268,6 @@
       node->NullAllInputs();
       break;
     }
-    // todo(ahaas): I added a list of missing instructions here to make merging
-    // easier when I do them one by one.
-    // kExprI64Add:
     case IrOpcode::kInt64Add: {
       DCHECK(node->InputCount() == 2);
 
@@ -283,8 +286,6 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-
-    // kExprI64Sub:
     case IrOpcode::kInt64Sub: {
       DCHECK(node->InputCount() == 2);
 
@@ -303,7 +304,6 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-    // kExprI64Mul:
     case IrOpcode::kInt64Mul: {
       DCHECK(node->InputCount() == 2);
 
@@ -322,11 +322,6 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-    // kExprI64DivS:
-    // kExprI64DivU:
-    // kExprI64RemS:
-    // kExprI64RemU:
-    // kExprI64Ior:
     case IrOpcode::kWord64Or: {
       DCHECK(node->InputCount() == 2);
       Node* left = node->InputAt(0);
@@ -341,8 +336,6 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-
-    // kExprI64Xor:
     case IrOpcode::kWord64Xor: {
       DCHECK(node->InputCount() == 2);
       Node* left = node->InputAt(0);
@@ -357,7 +350,6 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-    // kExprI64Shl:
     case IrOpcode::kWord64Shl: {
       // TODO(turbofan): if the shift count >= 32, then we can set the low word
       // of the output to 0 and just calculate the high word.
@@ -380,7 +372,6 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-    // kExprI64ShrU:
     case IrOpcode::kWord64Shr: {
       // TODO(turbofan): if the shift count >= 32, then we can set the low word
       // of the output to 0 and just calculate the high word.
@@ -403,7 +394,6 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-    // kExprI64ShrS:
     case IrOpcode::kWord64Sar: {
       // TODO(turbofan): if the shift count >= 32, then we can set the low word
       // of the output to 0 and just calculate the high word.
@@ -426,7 +416,6 @@
       ReplaceNode(node, low_node, high_node);
       break;
     }
-    // kExprI64Eq:
     case IrOpcode::kWord64Equal: {
       DCHECK(node->InputCount() == 2);
       Node* left = node->InputAt(0);
@@ -446,7 +435,6 @@
       ReplaceNode(node, replacement, nullptr);
       break;
     }
-    // kExprI64LtS:
     case IrOpcode::kInt64LessThan: {
       LowerComparison(node, machine()->Int32LessThan(),
                       machine()->Uint32LessThan());
@@ -467,8 +455,6 @@
                       machine()->Uint32LessThanOrEqual());
       break;
     }
-
-    // kExprI64SConvertI32:
     case IrOpcode::kChangeInt32ToInt64: {
       DCHECK(node->InputCount() == 1);
       Node* input = node->InputAt(0);
@@ -483,7 +469,6 @@
       node->NullAllInputs();
       break;
     }
-    // kExprI64UConvertI32: {
     case IrOpcode::kChangeUint32ToUint64: {
       DCHECK(node->InputCount() == 1);
       Node* input = node->InputAt(0);
@@ -494,7 +479,6 @@
       node->NullAllInputs();
       break;
     }
-    // kExprF64ReinterpretI64:
     case IrOpcode::kBitcastInt64ToFloat64: {
       DCHECK(node->InputCount() == 1);
       Node* input = node->InputAt(0);
@@ -523,7 +507,6 @@
       ReplaceNode(node, load, nullptr);
       break;
     }
-    // kExprI64ReinterpretF64:
     case IrOpcode::kBitcastFloat64ToInt64: {
       DCHECK(node->InputCount() == 1);
       Node* input = node->InputAt(0);
@@ -659,7 +642,6 @@
       }
       break;
     }
-    // kExprI64Clz:
     case IrOpcode::kWord64Clz: {
       DCHECK(node->InputCount() == 1);
       Node* input = node->InputAt(0);
@@ -678,7 +660,6 @@
       ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
       break;
     }
-    // kExprI64Ctz:
     case IrOpcode::kWord64Ctz: {
       DCHECK(node->InputCount() == 1);
       DCHECK(machine()->Word32Ctz().IsSupported());
@@ -698,7 +679,6 @@
       ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
       break;
     }
-    // kExprI64Popcnt:
     case IrOpcode::kWord64Popcnt: {
       DCHECK(node->InputCount() == 1);
       Node* input = node->InputAt(0);
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 7f6ef9a..054c421 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -23,6 +23,9 @@
 
   void LowerGraph();
 
+  static int GetParameterCountAfterLowering(
+      Signature<MachineRepresentation>* signature);
+
  private:
   enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
 
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 41f9c30..0d69a89 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -174,7 +174,7 @@
 // ES6 draft 08-24-14, section 20.2.2.17.
 Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchOne(Type::Number())) {
+  if (r.InputsMatchOne(Type::NumberOrUndefined())) {
     // Math.fround(a:number) -> TruncateFloat64ToFloat32(a)
     Node* value =
         graph()->NewNode(machine()->TruncateFloat64ToFloat32(), r.left());
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index 892dcc7..b3561e9 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -326,9 +326,8 @@
     }
 
     // Check that the {target} is still the {array_function}.
-    Node* check = effect =
-        graph()->NewNode(javascript()->StrictEqual(), target, array_function,
-                         context, effect, control);
+    Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
+                                   array_function, context);
     control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
                                effect, control);
 
@@ -344,15 +343,13 @@
           jsgraph()->Constant(handle(cell->value(), isolate()));
 
       // Check that the {target} is still the {target_function}.
-      Node* check = effect =
-          graph()->NewNode(javascript()->StrictEqual(), target, target_function,
-                           context, effect, control);
+      Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
+                                     target_function, context);
       control = graph()->NewNode(common()->DeoptimizeUnless(), check,
                                  frame_state, effect, control);
 
       // Specialize the JSCallFunction node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
-      NodeProperties::ReplaceEffectInput(node, effect);
       NodeProperties::ReplaceControlInput(node, control);
 
       // Try to further reduce the JSCallFunction {node}.
@@ -454,9 +451,8 @@
     }
 
     // Check that the {target} is still the {array_function}.
-    Node* check = effect =
-        graph()->NewNode(javascript()->StrictEqual(), target, array_function,
-                         context, effect, control);
+    Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
+                                   array_function, context);
     control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
                                effect, control);
 
@@ -478,9 +474,8 @@
           jsgraph()->Constant(handle(cell->value(), isolate()));
 
       // Check that the {target} is still the {target_function}.
-      Node* check = effect =
-          graph()->NewNode(javascript()->StrictEqual(), target, target_function,
-                           context, effect, control);
+      Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
+                                     target_function, context);
       control = graph()->NewNode(common()->DeoptimizeUnless(), check,
                                  frame_state, effect, control);
 
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index 2003363..16e1666 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -201,6 +201,8 @@
       return ReduceJSCreateArguments(node);
     case IrOpcode::kJSCreateArray:
       return ReduceJSCreateArray(node);
+    case IrOpcode::kJSCreateClosure:
+      return ReduceJSCreateClosure(node);
     case IrOpcode::kJSCreateIterResultObject:
       return ReduceJSCreateIterResultObject(node);
     case IrOpcode::kJSCreateLiteralArray:
@@ -278,6 +280,7 @@
   CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
   Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
   Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+  Node* const control = graph()->start();
   FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
 
   // Use the ArgumentsAccessStub for materializing both mapped and unmapped
@@ -291,38 +294,41 @@
             shared_info->has_duplicate_parameters()) {
           return NoChange();
         }
-        // TODO(bmeurer): Actually we don't need a frame state here.
         Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+        Operator::Properties properties = node->op()->properties();
         CallDescriptor* desc = Linkage::GetStubCallDescriptor(
             isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNeedsFrameState);
+            CallDescriptor::kNoFlags, properties);
         const Operator* new_op = common()->Call(desc);
         Node* stub_code = jsgraph()->HeapConstant(callable.code());
         node->InsertInput(graph()->zone(), 0, stub_code);
+        node->RemoveInput(3);  // Remove the frame state.
         NodeProperties::ChangeOp(node, new_op);
         return Changed(node);
       }
       case CreateArgumentsType::kUnmappedArguments: {
-        // TODO(bmeurer): Actually we don't need a frame state here.
         Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+        Operator::Properties properties = node->op()->properties();
         CallDescriptor* desc = Linkage::GetStubCallDescriptor(
             isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNeedsFrameState);
+            CallDescriptor::kNoFlags, properties);
         const Operator* new_op = common()->Call(desc);
         Node* stub_code = jsgraph()->HeapConstant(callable.code());
         node->InsertInput(graph()->zone(), 0, stub_code);
+        node->RemoveInput(3);  // Remove the frame state.
         NodeProperties::ChangeOp(node, new_op);
         return Changed(node);
       }
       case CreateArgumentsType::kRestParameter: {
-        // TODO(bmeurer): Actually we don't need a frame state here.
         Callable callable = CodeFactory::FastNewRestParameter(isolate());
+        Operator::Properties properties = node->op()->properties();
         CallDescriptor* desc = Linkage::GetStubCallDescriptor(
             isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNeedsFrameState);
+            CallDescriptor::kNoFlags, properties);
         const Operator* new_op = common()->Call(desc);
         Node* stub_code = jsgraph()->HeapConstant(callable.code());
         node->InsertInput(graph()->zone(), 0, stub_code);
+        node->RemoveInput(3);  // Remove the frame state.
         NodeProperties::ChangeOp(node, new_op);
         return Changed(node);
       }
@@ -335,7 +341,6 @@
       Handle<SharedFunctionInfo> shared;
       if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
       Node* const callee = NodeProperties::GetValueInput(node, 0);
-      Node* const control = NodeProperties::GetControlInput(node);
       Node* const context = NodeProperties::GetContextInput(node);
       Node* effect = NodeProperties::GetEffectInput(node);
       // TODO(mstarzinger): Duplicate parameters are not handled yet.
@@ -376,7 +381,6 @@
     } else if (type == CreateArgumentsType::kUnmappedArguments) {
       // Use inline allocation for all unmapped arguments objects within inlined
       // (i.e. non-outermost) frames, independent of the object size.
-      Node* const control = NodeProperties::GetControlInput(node);
       Node* const context = NodeProperties::GetContextInput(node);
       Node* effect = NodeProperties::GetEffectInput(node);
       // Choose the correct frame state and frame state info depending on
@@ -414,7 +418,6 @@
       int start_index = shared->internal_formal_parameter_count();
       // Use inline allocation for all unmapped arguments objects within inlined
       // (i.e. non-outermost) frames, independent of the object size.
-      Node* const control = NodeProperties::GetControlInput(node);
       Node* const context = NodeProperties::GetContextInput(node);
       Node* effect = NodeProperties::GetEffectInput(node);
       // Choose the correct frame state and frame state info depending on
@@ -471,6 +474,9 @@
   PretenureFlag pretenure = site->GetPretenureMode();
   ElementsKind elements_kind = site->GetElementsKind();
   DCHECK(IsFastElementsKind(elements_kind));
+  if (NodeProperties::GetType(length)->Max() > 0) {
+    elements_kind = GetHoleyElementsKind(elements_kind);
+  }
   dependencies()->AssumeTenuringDecision(site);
   dependencies()->AssumeTransitionStable(site);
 
@@ -540,6 +546,51 @@
   return NoChange();
 }
 
+Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
+  CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+  Handle<SharedFunctionInfo> shared = p.shared_info();
+
+  // Use inline allocation for functions that don't need literals cloning.
+  if (shared->num_literals() == 0) {
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+    Node* context = NodeProperties::GetContextInput(node);
+    Node* native_context = effect = graph()->NewNode(
+        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+        context, context, effect);
+    int function_map_index =
+        Context::FunctionMapIndex(shared->language_mode(), shared->kind());
+    Node* function_map = effect =
+        graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
+                         native_context, native_context, effect);
+    // Note that it is only safe to embed the raw entry point of the compile
+    // lazy stub into the code, because that stub is immortal and immovable.
+    Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
+        jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
+    Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+    Node* the_hole = jsgraph()->TheHoleConstant();
+    Node* undefined = jsgraph()->UndefinedConstant();
+    AllocationBuilder a(jsgraph(), effect, control);
+    STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
+    a.Allocate(JSFunction::kSize, p.pretenure());
+    a.Store(AccessBuilder::ForMap(), function_map);
+    a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
+    a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+    a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_fixed_array);
+    a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
+    a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
+    a.Store(AccessBuilder::ForJSFunctionContext(), context);
+    a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
+    a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
+    RelaxControls(node);
+    a.FinishAndChange(node);
+    return Changed(node);
+  }
+
+  return NoChange();
+}
+
 Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
   Node* value = NodeProperties::GetValueInput(node, 0);
@@ -886,8 +937,9 @@
     Handle<Name> property_name(
         boilerplate_map->instance_descriptors()->GetKey(i), isolate());
     FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
-    FieldAccess access = {kTaggedBase, index.offset(), property_name,
-                          Type::Tagged(), MachineType::AnyTagged()};
+    FieldAccess access = {
+        kTaggedBase,    index.offset(),           property_name,
+        Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
     Node* value;
     if (boilerplate->IsUnboxedDoubleField(index)) {
       access.machine_type = MachineType::Float64();
@@ -905,18 +957,21 @@
         site_context->ExitScope(current_site, boilerplate_object);
       } else if (property_details.representation().IsDouble()) {
         // Allocate a mutable HeapNumber box and store the value into it.
-        Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
-        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-            isolate(), jsgraph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNoFlags, Operator::kNoThrow);
+        effect = graph()->NewNode(common()->BeginRegion(), effect);
         value = effect = graph()->NewNode(
-            common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-            jsgraph()->NoContextConstant(), effect, control);
+            simplified()->Allocate(NOT_TENURED),
+            jsgraph()->Constant(HeapNumber::kSize), effect, control);
+        effect = graph()->NewNode(
+            simplified()->StoreField(AccessBuilder::ForMap()), value,
+            jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+            effect, control);
         effect = graph()->NewNode(
             simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
             value, jsgraph()->Constant(
                        Handle<HeapNumber>::cast(boilerplate_value)->value()),
             effect, control);
+        value = effect =
+            graph()->NewNode(common()->FinishRegion(), value, effect);
       } else if (property_details.representation().IsSmi()) {
         // Ensure that value is stored as smi.
         value = boilerplate_value->IsUninitialized()
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index 52e7ec2..57b28af 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -45,6 +45,7 @@
   Reduction ReduceJSCreate(Node* node);
   Reduction ReduceJSCreateArguments(Node* node);
   Reduction ReduceJSCreateArray(Node* node);
+  Reduction ReduceJSCreateClosure(Node* node);
   Reduction ReduceJSCreateIterResultObject(Node* node);
   Reduction ReduceJSCreateLiteral(Node* node);
   Reduction ReduceJSCreateFunctionContext(Node* node);
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 1f12579..105298e 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -28,10 +28,7 @@
                    : CallDescriptor::kNoFlags;
 }
 
-
-JSGenericLowering::JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph)
-    : is_typing_enabled_(is_typing_enabled), jsgraph_(jsgraph) {}
-
+JSGenericLowering::JSGenericLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
 
 JSGenericLowering::~JSGenericLowering() {}
 
@@ -44,40 +41,12 @@
       break;
     JS_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
-    case IrOpcode::kBranch:
-    case IrOpcode::kDeoptimizeIf:
-    case IrOpcode::kDeoptimizeUnless:
-      // TODO(mstarzinger): If typing is enabled then simplified lowering will
-      // have inserted the correct ChangeBoolToBit, otherwise we need to perform
-      // poor-man's representation inference here and insert manual change.
-      if (!is_typing_enabled_) {
-        Node* condition = node->InputAt(0);
-        Node* test = graph()->NewNode(machine()->WordEqual(), condition,
-                                      jsgraph()->TrueConstant());
-        node->ReplaceInput(0, test);
-      }
-      // Fall-through.
     default:
       // Nothing to see.
       return NoChange();
   }
   return Changed(node);
 }
-
-#define REPLACE_BINARY_OP_IC_CALL(Op, token)                                \
-  void JSGenericLowering::Lower##Op(Node* node) {                           \
-    CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);           \
-    ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token),    \
-                        CallDescriptor::kPatchableCallSiteWithNop | flags); \
-  }
-REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
-REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
-REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
-REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
-REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
-REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
-#undef REPLACE_BINARY_OP_IC_CALL
-
 #define REPLACE_RUNTIME_CALL(op, fun)             \
   void JSGenericLowering::Lower##op(Node* node) { \
     ReplaceWithRuntimeCall(node, fun);            \
@@ -95,18 +64,22 @@
   }
 REPLACE_STUB_CALL(Add)
 REPLACE_STUB_CALL(Subtract)
+REPLACE_STUB_CALL(Multiply)
+REPLACE_STUB_CALL(Divide)
+REPLACE_STUB_CALL(Modulus)
 REPLACE_STUB_CALL(BitwiseAnd)
 REPLACE_STUB_CALL(BitwiseOr)
 REPLACE_STUB_CALL(BitwiseXor)
+REPLACE_STUB_CALL(ShiftLeft)
+REPLACE_STUB_CALL(ShiftRight)
+REPLACE_STUB_CALL(ShiftRightLogical)
 REPLACE_STUB_CALL(LessThan)
 REPLACE_STUB_CALL(LessThanOrEqual)
 REPLACE_STUB_CALL(GreaterThan)
 REPLACE_STUB_CALL(GreaterThanOrEqual)
+REPLACE_STUB_CALL(HasProperty)
 REPLACE_STUB_CALL(Equal)
 REPLACE_STUB_CALL(NotEqual)
-REPLACE_STUB_CALL(StrictEqual)
-REPLACE_STUB_CALL(StrictNotEqual)
-REPLACE_STUB_CALL(ToBoolean)
 REPLACE_STUB_CALL(ToInteger)
 REPLACE_STUB_CALL(ToLength)
 REPLACE_STUB_CALL(ToNumber)
@@ -117,7 +90,12 @@
 
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags) {
-  Operator::Properties properties = node->op()->properties();
+  ReplaceWithStubCall(node, callable, flags, node->op()->properties());
+}
+
+void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
+                                            CallDescriptor::Flags flags,
+                                            Operator::Properties properties) {
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       isolate(), zone(), callable.descriptor(), 0, flags, properties);
   Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -143,11 +121,32 @@
   NodeProperties::ChangeOp(node, common()->Call(desc));
 }
 
+void JSGenericLowering::LowerJSStrictEqual(Node* node) {
+  Callable callable = CodeFactory::StrictEqual(isolate());
+  node->AppendInput(zone(), graph()->start());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
+                      Operator::kEliminatable);
+}
+
+void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
+  Callable callable = CodeFactory::StrictNotEqual(isolate());
+  node->AppendInput(zone(), graph()->start());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
+                      Operator::kEliminatable);
+}
+
+void JSGenericLowering::LowerJSToBoolean(Node* node) {
+  Callable callable = CodeFactory::ToBoolean(isolate());
+  node->AppendInput(zone(), graph()->start());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+                      Operator::kEliminatable);
+}
 
 void JSGenericLowering::LowerJSTypeOf(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   Callable callable = CodeFactory::Typeof(isolate());
-  ReplaceWithStubCall(node, callable, flags);
+  node->AppendInput(zone(), graph()->start());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+                      Operator::kEliminatable);
 }
 
 
@@ -344,11 +343,6 @@
 }
 
 
-void JSGenericLowering::LowerJSHasProperty(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kHasProperty);
-}
-
-
 void JSGenericLowering::LowerJSInstanceOf(Node* node) {
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   Callable callable = CodeFactory::InstanceOf(isolate());
@@ -686,9 +680,6 @@
 }
 
 
-void JSGenericLowering::LowerJSYield(Node* node) { UNIMPLEMENTED(); }
-
-
 void JSGenericLowering::LowerJSStackCheck(Node* node) {
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
index 5ee759b..38ee431 100644
--- a/src/compiler/js-generic-lowering.h
+++ b/src/compiler/js-generic-lowering.h
@@ -24,7 +24,7 @@
 // Lowers JS-level operators to runtime and IC calls in the "generic" case.
 class JSGenericLowering final : public Reducer {
  public:
-  JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph);
+  explicit JSGenericLowering(JSGraph* jsgraph);
   ~JSGenericLowering() final;
 
   Reduction Reduce(Node* node) final;
@@ -37,6 +37,8 @@
 
   // Helpers to replace existing nodes with a generic call.
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
+  void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
+                           Operator::Properties properties);
   void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
 
   Zone* zone() const;
@@ -47,7 +49,6 @@
   MachineOperatorBuilder* machine() const;
 
  private:
-  bool const is_typing_enabled_;
   JSGraph* const jsgraph_;
 };
 
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index d8c9f17..81ea1ad 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -74,6 +74,7 @@
   // properties of the global object here (represented as PropertyCell).
   LookupIterator it(global_object, name, LookupIterator::OWN);
   if (it.state() != LookupIterator::DATA) return NoChange();
+  if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
   Handle<PropertyCell> property_cell = it.GetPropertyCell();
   PropertyDetails property_details = property_cell->property_details();
   Handle<Object> property_cell_value(property_cell->value(), isolate());
@@ -154,6 +155,7 @@
   // properties of the global object here (represented as PropertyCell).
   LookupIterator it(global_object, name, LookupIterator::OWN);
   if (it.state() != LookupIterator::DATA) return NoChange();
+  if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
   Handle<PropertyCell> property_cell = it.GetPropertyCell();
   PropertyDetails property_details = property_cell->property_details();
   Handle<Object> property_cell_value(property_cell->value(), isolate());
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 98ca7aa..229169f 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -14,6 +14,15 @@
 #define CACHED(name, expr) \
   cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr))
 
+Node* JSGraph::AllocateInNewSpaceStubConstant() {
+  return CACHED(kAllocateInNewSpaceStubConstant,
+                HeapConstant(isolate()->builtins()->AllocateInNewSpace()));
+}
+
+Node* JSGraph::AllocateInOldSpaceStubConstant() {
+  return CACHED(kAllocateInOldSpaceStubConstant,
+                HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
+}
 
 Node* JSGraph::CEntryStubConstant(int result_size) {
   if (result_size == 1) {
@@ -29,11 +38,21 @@
                 HeapConstant(factory()->empty_fixed_array()));
 }
 
+Node* JSGraph::HeapNumberMapConstant() {
+  return CACHED(kHeapNumberMapConstant,
+                HeapConstant(factory()->heap_number_map()));
+}
+
 Node* JSGraph::OptimizedOutConstant() {
   return CACHED(kOptimizedOutConstant,
                 HeapConstant(factory()->optimized_out()));
 }
 
+Node* JSGraph::StaleRegisterConstant() {
+  return CACHED(kStaleRegisterConstant,
+                HeapConstant(factory()->stale_register()));
+}
+
 Node* JSGraph::UndefinedConstant() {
   return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
 }
@@ -76,9 +95,6 @@
 
 
 Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
-  if (value->IsConsString()) {
-    value = String::Flatten(Handle<String>::cast(value), TENURED);
-  }
   Node** loc = cache_.FindHeapConstant(value);
   if (*loc == nullptr) {
     *loc = graph()->NewNode(common()->HeapConstant(value));
@@ -139,6 +155,28 @@
   return *loc;
 }
 
+Node* JSGraph::RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
+  Node** loc = cache_.FindRelocatableInt32Constant(value);
+  if (*loc == nullptr) {
+    *loc = graph()->NewNode(common()->RelocatableInt32Constant(value, rmode));
+  }
+  return *loc;
+}
+
+Node* JSGraph::RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
+  Node** loc = cache_.FindRelocatableInt64Constant(value);
+  if (*loc == nullptr) {
+    *loc = graph()->NewNode(common()->RelocatableInt64Constant(value, rmode));
+  }
+  return *loc;
+}
+
+Node* JSGraph::RelocatableIntPtrConstant(intptr_t value,
+                                         RelocInfo::Mode rmode) {
+  return kPointerSize == 8
+             ? RelocatableInt64Constant(value, rmode)
+             : RelocatableInt32Constant(static_cast<int>(value), rmode);
+}
 
 Node* JSGraph::NumberConstant(double value) {
   Node** loc = cache_.FindNumberConstant(value);
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index 06e8030..e772da8 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -39,9 +39,13 @@
   }
 
   // Canonicalized global constants.
+  Node* AllocateInNewSpaceStubConstant();
+  Node* AllocateInOldSpaceStubConstant();
   Node* CEntryStubConstant(int result_size);
   Node* EmptyFixedArrayConstant();
+  Node* HeapNumberMapConstant();
   Node* OptimizedOutConstant();
+  Node* StaleRegisterConstant();
   Node* UndefinedConstant();
   Node* TheHoleConstant();
   Node* TrueConstant();
@@ -96,6 +100,10 @@
     return IntPtrConstant(bit_cast<intptr_t>(value));
   }
 
+  Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode);
+  Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode);
+  Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
+
   // Creates a Float32Constant node, usually canonicalized.
   Node* Float32Constant(float value);
 
@@ -135,9 +143,13 @@
 
  private:
   enum CachedNode {
+    kAllocateInNewSpaceStubConstant,
+    kAllocateInOldSpaceStubConstant,
     kCEntryStubConstant,
     kEmptyFixedArrayConstant,
+    kHeapNumberMapConstant,
     kOptimizedOutConstant,
+    kStaleRegisterConstant,
     kUndefinedConstant,
     kTheHoleConstant,
     kTrueConstant,
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index e3254bd..5c01ff3 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -414,7 +414,7 @@
 
   Zone zone(info_->isolate()->allocator());
   ParseInfo parse_info(&zone, function);
-  CompilationInfo info(&parse_info);
+  CompilationInfo info(&parse_info, function);
   if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
 
   if (!Compiler::ParseAndAnalyze(info.parse_info())) {
@@ -519,7 +519,7 @@
   // in that frame state tho, as the conversion of the receiver can be repeated
   // any number of times, it's not observable.
   if (node->opcode() == IrOpcode::kJSCallFunction &&
-      is_sloppy(info.language_mode()) && !shared_info->native()) {
+      is_sloppy(parse_info.language_mode()) && !shared_info->native()) {
     const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* convert = jsgraph_->graph()->NewNode(
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 034ee6f..70bcda5 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -86,8 +86,6 @@
       return ReduceNewObject(node);
     case Runtime::kInlineGetSuperConstructor:
       return ReduceGetSuperConstructor(node);
-    case Runtime::kInlineGetOrdinaryHasInstance:
-      return ReduceGetOrdinaryHasInstance(node);
     default:
       break;
   }
@@ -139,7 +137,7 @@
 
 Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
   // Tell the compiler to assume number input.
-  Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+  Node* renamed = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
                                    node->InputAt(0), graph()->start());
   node->ReplaceInput(0, renamed);
   return Change(node, machine()->Float64ExtractHighWord32());
@@ -148,7 +146,7 @@
 
 Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
   // Tell the compiler to assume number input.
-  Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+  Node* renamed = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
                                    node->InputAt(0), graph()->start());
   node->ReplaceInput(0, renamed);
   return Change(node, machine()->Float64ExtractLowWord32());
@@ -397,15 +395,7 @@
 }
 
 Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
-  Node* constructor = NodeProperties::GetValueInput(node, 0);
-  Node* new_target = NodeProperties::GetValueInput(node, 1);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  Node* value = graph()->NewNode(javascript()->Create(), constructor,
-                                 new_target, context, frame_state, effect);
-  ReplaceWithValue(node, value, value);
-  return Replace(value);
+  return Change(node, CodeFactory::FastNewObject(isolate()), 0);
 }
 
 Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
@@ -419,17 +409,6 @@
                 active_function_map, effect, control);
 }
 
-Reduction JSIntrinsicLowering::ReduceGetOrdinaryHasInstance(Node* node) {
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  return Change(node, javascript()->LoadContext(
-                          0, Context::ORDINARY_HAS_INSTANCE_INDEX, true),
-                native_context, context, effect);
-}
-
 Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
                                       Node* b) {
   RelaxControls(node);
@@ -466,12 +445,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
-  ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect);
-  return Changed(node);
-}
-
-
 Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
                                       int stack_parameter_count) {
   CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index a43ed01..59e6f49 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -63,14 +63,12 @@
   Reduction ReduceCall(Node* node);
   Reduction ReduceNewObject(Node* node);
   Reduction ReduceGetSuperConstructor(Node* node);
-  Reduction ReduceGetOrdinaryHasInstance(Node* node);
 
   Reduction Change(Node* node, const Operator* op);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c,
                    Node* d);
-  Reduction ChangeToUndefined(Node* node, Node* effect = nullptr);
   Reduction Change(Node* node, Callable const& callable,
                    int stack_parameter_count);
 
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index d1353d2..fbc064c 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -157,13 +157,7 @@
     // Perform map check on {receiver}.
     Type* receiver_type = access_info.receiver_type();
     if (receiver_type->Is(Type::String())) {
-      // Emit an instance type check for strings.
-      Node* receiver_instance_type = this_effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-          receiver_map, this_effect, fallthrough_control);
-      Node* check =
-          graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
-                           jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+      Node* check = graph()->NewNode(simplified()->ObjectIsString(), receiver);
       if (j == access_infos.size() - 1) {
         this_control =
             graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
@@ -270,7 +264,7 @@
                                    1 << JSArrayBuffer::WasNeutered::kShift)),
               jsgraph()->Int32Constant(0));
           this_control =
-              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
                                this_effect, this_control);
           break;
         }
@@ -285,8 +279,9 @@
             simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
             this_storage, this_effect, this_control);
       }
-      FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
-                                  field_type, MachineType::AnyTagged()};
+      FieldAccess field_access = {
+          kTaggedBase, field_index.offset(),     name,
+          field_type,  MachineType::AnyTagged(), kFullWriteBarrier};
       if (access_mode == AccessMode::kLoad) {
         if (field_type->Is(Type::UntaggedFloat64())) {
           if (!field_index.is_inobject() || field_index.is_hidden_field() ||
@@ -310,26 +305,28 @@
           this_control =
               graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
                                this_effect, this_control);
-          this_value = graph()->NewNode(common()->Guard(Type::Number()),
+          this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
                                         this_value, this_control);
 
           if (!field_index.is_inobject() || field_index.is_hidden_field() ||
               !FLAG_unbox_double_fields) {
             if (access_info.HasTransitionMap()) {
               // Allocate a MutableHeapNumber for the new property.
-              Callable callable =
-                  CodeFactory::AllocateMutableHeapNumber(isolate());
-              CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-                  isolate(), jsgraph()->zone(), callable.descriptor(), 0,
-                  CallDescriptor::kNoFlags, Operator::kNoThrow);
-              Node* this_box = this_effect = graph()->NewNode(
-                  common()->Call(desc),
-                  jsgraph()->HeapConstant(callable.code()),
-                  jsgraph()->NoContextConstant(), this_effect, this_control);
+              this_effect =
+                  graph()->NewNode(common()->BeginRegion(), this_effect);
+              Node* this_box = this_effect =
+                  graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+                                   jsgraph()->Constant(HeapNumber::kSize),
+                                   this_effect, this_control);
+              this_effect = graph()->NewNode(
+                  simplified()->StoreField(AccessBuilder::ForMap()), this_box,
+                  jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+                  this_effect, this_control);
               this_effect = graph()->NewNode(
                   simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
                   this_box, this_value, this_effect, this_control);
-              this_value = this_box;
+              this_value = this_effect = graph()->NewNode(
+                  common()->FinishRegion(), this_box, this_effect);
 
               field_access.type = Type::TaggedPointer();
             } else {
@@ -351,8 +348,9 @@
           this_control =
               graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
                                this_effect, this_control);
-          this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
-                                        this_value, this_control);
+          this_value =
+              graph()->NewNode(simplified()->TypeGuard(type_cache_.kSmi),
+                               this_value, this_control);
         } else if (field_type->Is(Type::TaggedPointer())) {
           Node* check =
               graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
@@ -431,25 +429,29 @@
     AccessMode access_mode, LanguageMode language_mode) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
          node->opcode() == IrOpcode::kJSStoreNamed);
+  Node* const receiver = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
 
   // Check if the {nexus} reports type feedback for the IC.
   if (nexus.IsUninitialized()) {
     if ((flags() & kDeoptimizationEnabled) &&
         (flags() & kBailoutOnUninitialized)) {
-      // TODO(turbofan): Implement all eager bailout points correctly in
-      // the graph builder.
-      Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-      if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
-        return ReduceSoftDeoptimize(node);
-      }
+      return ReduceSoftDeoptimize(node);
     }
     return NoChange();
   }
 
   // Extract receiver maps from the IC using the {nexus}.
   MapHandleList receiver_maps;
-  if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
-  DCHECK_LT(0, receiver_maps.length());
+  if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
+    return NoChange();
+  } else if (receiver_maps.length() == 0) {
+    if ((flags() & kDeoptimizationEnabled) &&
+        (flags() & kBailoutOnUninitialized)) {
+      return ReduceSoftDeoptimize(node);
+    }
+    return NoChange();
+  }
 
   // Try to lower the named access based on the {receiver_maps}.
   return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
@@ -460,8 +462,33 @@
 Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
   NamedAccess const& p = NamedAccessOf(node->op());
+  Node* const receiver = NodeProperties::GetValueInput(node, 0);
   Node* const value = jsgraph()->Dead();
 
+  // Check if we have a constant receiver.
+  HeapObjectMatcher m(receiver);
+  if (m.HasValue()) {
+    // Optimize "prototype" property of functions.
+    if (m.Value()->IsJSFunction() &&
+        p.name().is_identical_to(factory()->prototype_string())) {
+      Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+      if (function->has_initial_map()) {
+        // We need to add a code dependency on the initial map of the
+        // {function} in order to be notified about changes to the
+        // "prototype" of {function}, so it doesn't make sense to
+        // continue unless deoptimization is enabled.
+        if (flags() & kDeoptimizationEnabled) {
+          Handle<Map> initial_map(function->initial_map(), isolate());
+          dependencies()->AssumeInitialMapCantChange(initial_map);
+          Handle<Object> prototype(initial_map->prototype(), isolate());
+          Node* value = jsgraph()->Constant(prototype);
+          ReplaceWithValue(node, value);
+          return Replace(value);
+        }
+      }
+    }
+  }
+
   // Extract receiver maps from the LOAD_IC using the LoadICNexus.
   if (!p.feedback().IsValid()) return NoChange();
   LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
@@ -664,8 +691,8 @@
           graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
       this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
                                       frame_state, this_effect, this_control);
-      this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
-                                    this_control);
+      this_index = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
+                                    this_index, this_control);
     }
 
     // Convert the {index} to an unsigned32 value and check if the result is
@@ -729,7 +756,8 @@
       element_type = type_cache_.kSmi;
     }
     ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
-                                    element_type, element_machine_type};
+                                    element_type, element_machine_type,
+                                    kFullWriteBarrier};
 
     // Access the actual element.
     // TODO(bmeurer): Refactor this into separate methods or even a separate
@@ -786,8 +814,8 @@
         }
         // Rename the result to represent the actual type (not polluted by the
         // hole).
-        this_value = graph()->NewNode(common()->Guard(element_type), this_value,
-                                      this_control);
+        this_value = graph()->NewNode(simplified()->TypeGuard(element_type),
+                                      this_value, this_control);
       } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
         // Perform the hole check on the result.
         Node* check =
@@ -820,14 +848,14 @@
         Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
         this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
                                         frame_state, this_effect, this_control);
-        this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
+        this_value = graph()->NewNode(simplified()->TypeGuard(type_cache_.kSmi),
                                       this_value, this_control);
       } else if (IsFastDoubleElementsKind(elements_kind)) {
         Node* check =
             graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
         this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
                                         frame_state, this_effect, this_control);
-        this_value = graph()->NewNode(common()->Guard(Type::Number()),
+        this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
                                       this_value, this_control);
       }
       this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
@@ -873,25 +901,29 @@
     KeyedAccessStoreMode store_mode) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty);
+  Node* const receiver = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
 
   // Check if the {nexus} reports type feedback for the IC.
   if (nexus.IsUninitialized()) {
     if ((flags() & kDeoptimizationEnabled) &&
         (flags() & kBailoutOnUninitialized)) {
-      // TODO(turbofan): Implement all eager bailout points correctly in
-      // the graph builder.
-      Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-      if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
-        return ReduceSoftDeoptimize(node);
-      }
+      return ReduceSoftDeoptimize(node);
     }
     return NoChange();
   }
 
   // Extract receiver maps from the {nexus}.
   MapHandleList receiver_maps;
-  if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
-  DCHECK_LT(0, receiver_maps.length());
+  if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
+    return NoChange();
+  } else if (receiver_maps.length() == 0) {
+    if ((flags() & kDeoptimizationEnabled) &&
+        (flags() & kBailoutOnUninitialized)) {
+      return ReduceSoftDeoptimize(node);
+    }
+    return NoChange();
+  }
 
   // Optimize access for constant {index}.
   HeapObjectMatcher mindex(index);
@@ -995,6 +1027,84 @@
   }
 }
 
+bool JSNativeContextSpecialization::ExtractReceiverMaps(
+    Node* receiver, Node* effect, FeedbackNexus const& nexus,
+    MapHandleList* receiver_maps) {
+  DCHECK_EQ(0, receiver_maps->length());
+  // See if we can infer a concrete type for the {receiver}.
+  Handle<Map> receiver_map;
+  if (InferReceiverMap(receiver, effect).ToHandle(&receiver_map)) {
+    // We can assume that the {receiver} still has the infered {receiver_map}.
+    receiver_maps->Add(receiver_map);
+    return true;
+  }
+  // Try to extract some maps from the {nexus}.
+  if (nexus.ExtractMaps(receiver_maps) != 0) {
+    // Try to filter impossible candidates based on infered root map.
+    if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
+      for (int i = receiver_maps->length(); --i >= 0;) {
+        if (receiver_maps->at(i)->FindRootMap() != *receiver_map) {
+          receiver_maps->Remove(i);
+        }
+      }
+    }
+    return true;
+  }
+  return false;
+}
+
+MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
+                                                                 Node* effect) {
+  NodeMatcher m(receiver);
+  if (m.IsJSCreate()) {
+    HeapObjectMatcher mtarget(m.InputAt(0));
+    HeapObjectMatcher mnewtarget(m.InputAt(1));
+    if (mtarget.HasValue() && mnewtarget.HasValue()) {
+      Handle<JSFunction> constructor =
+          Handle<JSFunction>::cast(mtarget.Value());
+      if (constructor->has_initial_map()) {
+        Handle<Map> initial_map(constructor->initial_map(), isolate());
+        if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
+          // Walk up the {effect} chain to see if the {receiver} is the
+          // dominating effect and there's no other observable write in
+          // between.
+          while (true) {
+            if (receiver == effect) return initial_map;
+            if (!effect->op()->HasProperty(Operator::kNoWrite) ||
+                effect->op()->EffectInputCount() != 1) {
+              break;
+            }
+            effect = NodeProperties::GetEffectInput(effect);
+          }
+        }
+      }
+    }
+  }
+  return MaybeHandle<Map>();
+}
+
+MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
+    Node* receiver) {
+  HeapObjectMatcher m(receiver);
+  if (m.HasValue()) {
+    return handle(m.Value()->map()->FindRootMap(), isolate());
+  } else if (m.IsJSCreate()) {
+    HeapObjectMatcher mtarget(m.InputAt(0));
+    HeapObjectMatcher mnewtarget(m.InputAt(1));
+    if (mtarget.HasValue() && mnewtarget.HasValue()) {
+      Handle<JSFunction> constructor =
+          Handle<JSFunction>::cast(mtarget.Value());
+      if (constructor->has_initial_map()) {
+        Handle<Map> initial_map(constructor->initial_map(), isolate());
+        if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
+          DCHECK_EQ(*initial_map, initial_map->FindRootMap());
+          return initial_map;
+        }
+      }
+    }
+  }
+  return MaybeHandle<Map>();
+}
 
 MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
     Node* node) {
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 5562c6e..7d43bfb 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -85,6 +85,20 @@
                               Handle<Context> native_context,
                               Handle<JSObject> holder);
 
+  // Extract receiver maps from {nexus} and filter based on {receiver} if
+  // possible.
+  bool ExtractReceiverMaps(Node* receiver, Node* effect,
+                           FeedbackNexus const& nexus,
+                           MapHandleList* receiver_maps);
+
+  // Try to infer a map for the given {receiver} at the current {effect}.
+  // If a map is returned then you can be sure that the {receiver} definitely
+  // has the returned map at this point in the program (identified by {effect}).
+  MaybeHandle<Map> InferReceiverMap(Node* receiver, Node* effect);
+  // Try to infer a root map for the {receiver} independent of the current
+  // program location.
+  MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
+
   // Retrieve the native context from the given {node} if known.
   MaybeHandle<Context> GetNativeContext(Node* node);
 
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index 98e090b..dfbe742 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -379,8 +379,8 @@
 #define CACHED_OP_LIST(V)                                  \
   V(Equal, Operator::kNoProperties, 2, 1)                  \
   V(NotEqual, Operator::kNoProperties, 2, 1)               \
-  V(StrictEqual, Operator::kNoThrow, 2, 1)                 \
-  V(StrictNotEqual, Operator::kNoThrow, 2, 1)              \
+  V(StrictEqual, Operator::kPure, 2, 1)                    \
+  V(StrictNotEqual, Operator::kPure, 2, 1)                 \
   V(LessThan, Operator::kNoProperties, 2, 1)               \
   V(GreaterThan, Operator::kNoProperties, 2, 1)            \
   V(LessThanOrEqual, Operator::kNoProperties, 2, 1)        \
@@ -389,13 +389,12 @@
   V(ToLength, Operator::kNoProperties, 1, 1)               \
   V(ToName, Operator::kNoProperties, 1, 1)                 \
   V(ToNumber, Operator::kNoProperties, 1, 1)               \
-  V(ToObject, Operator::kNoProperties, 1, 1)               \
+  V(ToObject, Operator::kFoldable, 1, 1)                   \
   V(ToString, Operator::kNoProperties, 1, 1)               \
-  V(Yield, Operator::kNoProperties, 1, 1)                  \
   V(Create, Operator::kEliminatable, 2, 1)                 \
   V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
   V(HasProperty, Operator::kNoProperties, 2, 1)            \
-  V(TypeOf, Operator::kEliminatable, 1, 1)                 \
+  V(TypeOf, Operator::kPure, 1, 1)                         \
   V(InstanceOf, Operator::kNoProperties, 2, 1)             \
   V(ForInDone, Operator::kPure, 2, 1)                      \
   V(ForInNext, Operator::kNoProperties, 4, 1)              \
@@ -541,11 +540,11 @@
 
 const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  return new (zone()) Operator1<ToBooleanHints>(        //--
-      IrOpcode::kJSToBoolean, Operator::kEliminatable,  // opcode
-      "JSToBoolean",                                    // name
-      1, 1, 0, 1, 1, 0,                                 // inputs/outputs
-      hints);                                           // parameter
+  return new (zone()) Operator1<ToBooleanHints>(  //--
+      IrOpcode::kJSToBoolean, Operator::kPure,    // opcode
+      "JSToBoolean",                              // name
+      1, 0, 0, 1, 0, 0,                           // inputs/outputs
+      hints);                                     // parameter
 }
 
 const Operator* JSOperatorBuilder::CallFunction(
@@ -707,11 +706,11 @@
 
 
 const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
-  return new (zone()) Operator1<CreateArgumentsType>(    // --
-      IrOpcode::kJSCreateArguments, Operator::kNoThrow,  // opcode
-      "JSCreateArguments",                               // name
-      1, 1, 1, 1, 1, 0,                                  // counts
-      type);                                             // parameter
+  return new (zone()) Operator1<CreateArgumentsType>(         // --
+      IrOpcode::kJSCreateArguments, Operator::kEliminatable,  // opcode
+      "JSCreateArguments",                                    // name
+      1, 1, 0, 1, 1, 0,                                       // counts
+      type);                                                  // parameter
 }
 
 
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index eb323c9..750817a 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -410,7 +410,6 @@
   const Operator* ToNumber();
   const Operator* ToObject();
   const Operator* ToString();
-  const Operator* Yield();
 
   const Operator* Create();
   const Operator* CreateArguments(CreateArgumentsType type);
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 7e1a0dc..8099533 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -27,7 +27,7 @@
   JSBinopReduction(JSTypedLowering* lowering, Node* node)
       : lowering_(lowering), node_(node) {}
 
-  void ConvertInputsToNumber(Node* frame_state) {
+  void ConvertInputsToNumberOrUndefined(Node* frame_state) {
     // To convert the inputs to numbers, we have to provide frame states
     // for lazy bailouts in the ToNumber conversions.
     // We use a little hack here: we take the frame state before the binary
@@ -46,11 +46,11 @@
       ConvertBothInputsToNumber(&left_input, &right_input, frame_state);
     } else {
       left_input = left_is_primitive
-                       ? ConvertPlainPrimitiveToNumber(left())
+                       ? ConvertPlainPrimitiveToNumberOrUndefined(left())
                        : ConvertSingleInputToNumber(
                              left(), CreateFrameStateForLeftInput(frame_state));
       right_input = right_is_primitive
-                        ? ConvertPlainPrimitiveToNumber(right())
+                        ? ConvertPlainPrimitiveToNumberOrUndefined(right())
                         : ConvertSingleInputToNumber(
                               right(), CreateFrameStateForRightInput(
                                            frame_state, left_input));
@@ -107,32 +107,6 @@
     return lowering_->Changed(node_);
   }
 
-  Reduction ChangeToStringComparisonOperator(const Operator* op,
-                                             bool invert = false) {
-    if (node_->op()->ControlInputCount() > 0) {
-      lowering_->RelaxControls(node_);
-    }
-    // String comparison operators need effect and control inputs, so copy them
-    // over.
-    Node* effect = NodeProperties::GetEffectInput(node_);
-    Node* control = NodeProperties::GetControlInput(node_);
-    node_->ReplaceInput(2, effect);
-    node_->ReplaceInput(3, control);
-
-    node_->TrimInputCount(4);
-    NodeProperties::ChangeOp(node_, op);
-
-    if (invert) {
-      // Insert a boolean-not to invert the value.
-      Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
-      node_->ReplaceUses(value);
-      // Note: ReplaceUses() smashes all uses, so smash it back here.
-      value->ReplaceInput(0, node_);
-      return lowering_->Replace(value);
-    }
-    return lowering_->Changed(node_);
-  }
-
   Reduction ChangeToPureOperator(const Operator* op, Type* type) {
     return ChangeToPureOperator(op, false, type);
   }
@@ -242,12 +216,14 @@
         frame_state->InputAt(kFrameStateOuterStateInput));
   }
 
-  Node* ConvertPlainPrimitiveToNumber(Node* node) {
+  Node* ConvertPlainPrimitiveToNumberOrUndefined(Node* node) {
     DCHECK(NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
     // Avoid inserting too many eager ToNumber() operations.
     Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
     if (reduction.Changed()) return reduction.replacement();
-    // TODO(jarin) Use PlainPrimitiveToNumber once we have it.
+    if (NodeProperties::GetType(node)->Is(Type::NumberOrUndefined())) {
+      return node;
+    }
     return graph()->NewNode(
         javascript()->ToNumber(), node, jsgraph()->NoContextConstant(),
         jsgraph()->EmptyFrameState(), graph()->start(), graph()->start());
@@ -257,7 +233,9 @@
     DCHECK(!NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
     Node* const n = graph()->NewNode(javascript()->ToNumber(), node, context(),
                                      frame_state, effect(), control());
-    NodeProperties::ReplaceUses(node_, node_, node_, n, n);
+    Node* const if_success = graph()->NewNode(common()->IfSuccess(), n);
+    NodeProperties::ReplaceControlInput(node_, if_success);
+    NodeProperties::ReplaceUses(node_, node_, node_, node_, n);
     update_effect(n);
     return n;
   }
@@ -361,20 +339,27 @@
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
   JSBinopReduction r(this, node);
-  if (r.BothInputsAre(Type::Number())) {
+  if (r.BothInputsAre(Type::NumberOrUndefined())) {
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
-    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
+    return ReduceNumberBinop(node, simplified()->NumberAdd());
   }
   if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
     // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
     Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-    r.ConvertInputsToNumber(frame_state);
+    r.ConvertInputsToNumberOrUndefined(frame_state);
     return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
-  if (r.BothInputsAre(Type::String())) {
-    // JSAdd(x:string, y:string) => CallStub[StringAdd](x, y)
+  if (r.OneInputIs(Type::String())) {
+    StringAddFlags flags = STRING_ADD_CHECK_NONE;
+    if (!r.LeftInputIs(Type::String())) {
+      flags = STRING_ADD_CONVERT_LEFT;
+    } else if (!r.RightInputIs(Type::String())) {
+      flags = STRING_ADD_CONVERT_RIGHT;
+    }
+    // JSAdd(x:string, y) => CallStub[StringAdd](x, y)
+    // JSAdd(x, y:string) => CallStub[StringAdd](x, y)
     Callable const callable =
-        CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+        CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
     CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
         isolate(), graph()->zone(), callable.descriptor(), 0,
         CallDescriptor::kNeedsFrameState, node->op()->properties());
@@ -408,13 +393,13 @@
 
   JSBinopReduction r(this, node);
   if (numberOp == simplified()->NumberModulus()) {
-    if (r.BothInputsAre(Type::Number())) {
+    if (r.BothInputsAre(Type::NumberOrUndefined())) {
       return r.ChangeToPureOperator(numberOp, Type::Number());
     }
     return NoChange();
   }
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumber(frame_state);
+  r.ConvertInputsToNumberOrUndefined(frame_state);
   return r.ChangeToPureOperator(numberOp, Type::Number());
 }
 
@@ -424,7 +409,7 @@
 
   JSBinopReduction r(this, node);
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumber(frame_state);
+  r.ConvertInputsToNumberOrUndefined(frame_state);
   r.ConvertInputsToUI32(kSigned, kSigned);
   return r.ChangeToPureOperator(intOp, Type::Integral32());
 }
@@ -437,7 +422,7 @@
 
   JSBinopReduction r(this, node);
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-  r.ConvertInputsToNumber(frame_state);
+  r.ConvertInputsToNumberOrUndefined(frame_state);
   r.ConvertInputsToUI32(left_signedness, kUnsigned);
   return r.ChangeToPureOperator(shift_op);
 }
@@ -468,7 +453,7 @@
       default:
         return NoChange();
     }
-    r.ChangeToStringComparisonOperator(stringOp);
+    r.ChangeToPureOperator(stringOp);
     return Changed(node);
   }
   if (r.OneInputCannotBe(Type::StringOrReceiver())) {
@@ -483,7 +468,7 @@
     } else {
       // TODO(turbofan): mixed signed/unsigned int32 comparisons.
       Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
-      r.ConvertInputsToNumber(frame_state);
+      r.ConvertInputsToNumberOrUndefined(frame_state);
       less_than = simplified()->NumberLessThan();
       less_than_or_equal = simplified()->NumberLessThanOrEqual();
     }
@@ -512,18 +497,61 @@
   return NoChange();  // Keep a generic comparison.
 }
 
+Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
+  HeapObjectBinopMatcher m(node);
+  if (m.left().IsJSTypeOf() && m.right().HasValue() &&
+      m.right().Value()->IsString()) {
+    Node* replacement;
+    Node* input = m.left().InputAt(0);
+    Handle<String> value = Handle<String>::cast(m.right().Value());
+    if (String::Equals(value, factory()->boolean_string())) {
+      replacement = graph()->NewNode(
+          common()->Select(MachineRepresentation::kTagged),
+          graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
+                           jsgraph()->TrueConstant()),
+          jsgraph()->TrueConstant(),
+          graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
+                           jsgraph()->FalseConstant()));
+    } else if (String::Equals(value, factory()->function_string())) {
+      replacement = graph()->NewNode(simplified()->ObjectIsCallable(), input);
+    } else if (String::Equals(value, factory()->number_string())) {
+      replacement = graph()->NewNode(simplified()->ObjectIsNumber(), input);
+    } else if (String::Equals(value, factory()->string_string())) {
+      replacement = graph()->NewNode(simplified()->ObjectIsString(), input);
+    } else if (String::Equals(value, factory()->undefined_string())) {
+      replacement = graph()->NewNode(
+          common()->Select(MachineRepresentation::kTagged),
+          graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
+                           jsgraph()->NullConstant()),
+          jsgraph()->FalseConstant(),
+          graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+    } else {
+      return NoChange();
+    }
+    if (invert) {
+      replacement = graph()->NewNode(simplified()->BooleanNot(), replacement);
+    }
+    return Replace(replacement);
+  }
+  return NoChange();
+}
 
 Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
+  Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
+  if (reduction.Changed()) {
+    ReplaceWithValue(node, reduction.replacement());
+    return reduction;
+  }
+
   JSBinopReduction r(this, node);
 
   if (r.BothInputsAre(Type::Number())) {
     return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
   }
   if (r.BothInputsAre(Type::String())) {
-    return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
-                                              invert);
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
   if (r.BothInputsAre(Type::Boolean())) {
     return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
@@ -573,6 +601,10 @@
       return Replace(replacement);
     }
   }
+  Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
+  if (reduction.Changed()) {
+    return reduction;
+  }
   if (r.OneInputIs(the_hole_type_)) {
     return r.ChangeToPureOperator(simplified()->ReferenceEqual(the_hole_type_),
                                   invert);
@@ -602,10 +634,9 @@
                                   invert);
   }
   if (r.BothInputsAre(Type::String())) {
-    return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
-                                              invert);
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
-  if (r.BothInputsAre(Type::Number())) {
+  if (r.BothInputsAre(Type::NumberOrUndefined())) {
     return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
   }
   // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
@@ -616,10 +647,8 @@
 Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
   Node* const input = node->InputAt(0);
   Type* const input_type = NodeProperties::GetType(input);
-  Node* const effect = NodeProperties::GetEffectInput(node);
   if (input_type->Is(Type::Boolean())) {
     // JSToBoolean(x:boolean) => x
-    ReplaceWithValue(node, input, effect);
     return Replace(input);
   } else if (input_type->Is(Type::OrderedNumber())) {
     // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
@@ -633,11 +662,10 @@
     // JSToBoolean(x:string) => NumberLessThan(#0,x.length)
     FieldAccess const access = AccessBuilder::ForStringLength();
     Node* length = graph()->NewNode(simplified()->LoadField(access), input,
-                                    effect, graph()->start());
+                                    graph()->start(), graph()->start());
     ReplaceWithValue(node, node, length);
     node->ReplaceInput(0, jsgraph()->ZeroConstant());
     node->ReplaceInput(1, length);
-    node->TrimInputCount(2);
     NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
     return Changed(node);
   }
@@ -691,12 +719,6 @@
 }
 
 Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
-  if (input->opcode() == IrOpcode::kJSToNumber) {
-    // Recursively try to reduce the input first.
-    Reduction result = ReduceJSToNumber(input);
-    if (result.Changed()) return result;
-    return Changed(input);  // JSToNumber(JSToNumber(x)) => JSToNumber(x)
-  }
   // Check for ToNumber truncation of signaling NaN to undefined mapping.
   if (input->opcode() == IrOpcode::kSelect) {
     Node* check = NodeProperties::GetValueInput(input, 0);
@@ -914,27 +936,6 @@
     ReplaceWithValue(node, value, effect);
     return Replace(value);
   }
-  // Optimize "prototype" property of functions.
-  if (name.is_identical_to(factory()->prototype_string()) &&
-      receiver_type->IsConstant() &&
-      receiver_type->AsConstant()->Value()->IsJSFunction()) {
-    // TODO(turbofan): This lowering might not kick in if we ever lower
-    // the C++ accessor for "prototype" in an earlier optimization pass.
-    Handle<JSFunction> function =
-        Handle<JSFunction>::cast(receiver_type->AsConstant()->Value());
-    if (function->has_initial_map()) {
-      // We need to add a code dependency on the initial map of the {function}
-      // in order to be notified about changes to the "prototype" of {function},
-      // so it doesn't make sense to continue unless deoptimization is enabled.
-      if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-      Handle<Map> initial_map(function->initial_map(), isolate());
-      dependencies()->AssumeInitialMapCantChange(initial_map);
-      Node* value =
-          jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
-      ReplaceWithValue(node, value);
-      return Replace(value);
-    }
-  }
   return NoChange();
 }
 
@@ -1012,7 +1013,7 @@
         Node* effect = NodeProperties::GetEffectInput(node);
         Node* control = NodeProperties::GetControlInput(node);
         // Convert to a number first.
-        if (!value_type->Is(Type::Number())) {
+        if (!value_type->Is(Type::NumberOrUndefined())) {
           Reduction number_reduction = ReduceJSToNumberInput(value);
           if (number_reduction.Changed()) {
             value = number_reduction.replacement();
@@ -1065,10 +1066,7 @@
   Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
 
   // If deoptimization is disabled, we cannot optimize.
-  if (!(flags() & kDeoptimizationEnabled) ||
-      (flags() & kDisableBinaryOpReduction)) {
-    return NoChange();
-  }
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
 
   // If we are in a try block, don't optimize since the runtime call
   // in the proxy case can throw.
@@ -1087,15 +1085,21 @@
       Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
   Handle<SharedFunctionInfo> shared(function->shared(), isolate());
 
-  if (!function->IsConstructor() ||
-      function->map()->has_non_instance_prototype()) {
+  // Make sure the prototype of {function} is the %FunctionPrototype%, and it
+  // already has a meaningful initial map (i.e. we constructed at least one
+  // instance using the constructor {function}).
+  if (function->map()->prototype() != function->native_context()->closure() ||
+      function->map()->has_non_instance_prototype() ||
+      !function->has_initial_map()) {
     return NoChange();
   }
 
-  JSFunction::EnsureHasInitialMap(function);
-  DCHECK(function->has_initial_map());
+  // We can only use the fast case if @@hasInstance was not used so far.
+  if (!isolate()->IsHasInstanceLookupChainIntact()) return NoChange();
+  dependencies()->AssumePropertyCell(factory()->has_instance_protector());
+
   Handle<Map> initial_map(function->initial_map(), isolate());
-  this->dependencies()->AssumeInitialMapCantChange(initial_map);
+  dependencies()->AssumeInitialMapCantChange(initial_map);
   Node* prototype =
       jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
 
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 1517871..8733e6c 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -59,6 +59,7 @@
   Reduction ReduceJSInstanceOf(Node* node);
   Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSStoreContext(Node* node);
+  Reduction ReduceJSEqualTypeOf(Node* node, bool invert);
   Reduction ReduceJSEqual(Node* node, bool invert);
   Reduction ReduceJSStrictEqual(Node* node, bool invert);
   Reduction ReduceJSToBoolean(Node* node);
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 105bd35..5e217b0 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -88,7 +88,7 @@
 
 bool CallDescriptor::CanTailCall(const Node* node,
                                  int* stack_param_delta) const {
-  CallDescriptor const* other = OpParameter<CallDescriptor const*>(node);
+  CallDescriptor const* other = CallDescriptorOf(node->op());
   size_t current_input = 0;
   size_t other_input = 0;
   *stack_param_delta = 0;
@@ -112,19 +112,12 @@
     ++current_input;
     ++other_input;
   }
-  return HasSameReturnLocationsAs(OpParameter<CallDescriptor const*>(node));
+  return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
 }
 
 
 CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
   DCHECK(!info->IsStub());
-  if (info->has_literal()) {
-    // If we already have the function literal, use the number of parameters
-    // plus the receiver.
-    return GetJSCallDescriptor(zone, info->is_osr(),
-                               1 + info->literal()->parameter_count(),
-                               CallDescriptor::kNoFlags);
-  }
   if (!info->closure().is_null()) {
     // If we are compiling a JS function, use a JS call descriptor,
     // plus the receiver.
@@ -143,14 +136,19 @@
   // not to call into arbitrary JavaScript, not to throw, and not to deoptimize
   // are blacklisted here and can be called without a FrameState.
   switch (function) {
+    case Runtime::kAbort:
     case Runtime::kAllocateInTargetSpace:
     case Runtime::kCreateIterResultObject:
     case Runtime::kDefineDataPropertyInLiteral:
     case Runtime::kDefineGetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kDefineSetterPropertyUnchecked:  // TODO(jarin): Is it safe?
-    case Runtime::kFinalizeClassDefinition:        // TODO(conradw): Is it safe?
     case Runtime::kForInDone:
     case Runtime::kForInStep:
+    case Runtime::kGeneratorSetContext:
+    case Runtime::kGeneratorGetContinuation:
+    case Runtime::kGeneratorSetContinuation:
+    case Runtime::kGeneratorLoadRegister:
+    case Runtime::kGeneratorStoreRegister:
     case Runtime::kGetSuperConstructor:
     case Runtime::kIsFunction:
     case Runtime::kNewClosure:
@@ -166,6 +164,7 @@
     case Runtime::kStringLessThanOrEqual:
     case Runtime::kStringGreaterThan:
     case Runtime::kStringGreaterThanOrEqual:
+    case Runtime::kToFastProperties:  // TODO(conradw): Is it safe?
     case Runtime::kTraceEnter:
     case Runtime::kTraceExit:
       return 0;
@@ -411,6 +410,78 @@
       descriptor.DebugName(isolate));
 }
 
+// static
+CallDescriptor* Linkage::GetAllocateCallDescriptor(Zone* zone) {
+  LocationSignature::Builder locations(zone, 1, 1);
+  MachineSignature::Builder types(zone, 1, 1);
+
+  locations.AddParam(regloc(kAllocateSizeRegister));
+  types.AddParam(MachineType::Int32());
+
+  locations.AddReturn(regloc(kReturnRegister0));
+  types.AddReturn(MachineType::AnyTagged());
+
+  // The target for allocate calls is a code object.
+  MachineType target_type = MachineType::AnyTagged();
+  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  return new (zone) CallDescriptor(     // --
+      CallDescriptor::kCallCodeObject,  // kind
+      target_type,                      // target MachineType
+      target_loc,                       // target location
+      types.Build(),                    // machine_sig
+      locations.Build(),                // location_sig
+      0,                                // stack_parameter_count
+      Operator::kNoThrow,               // properties
+      kNoCalleeSaved,                   // callee-saved registers
+      kNoCalleeSaved,                   // callee-saved fp
+      CallDescriptor::kCanUseRoots,     // flags
+      "Allocate");
+}
+
+// static
+CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
+    Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+    int stack_parameter_count) {
+  const int register_parameter_count = descriptor.GetRegisterParameterCount();
+  const int parameter_count = register_parameter_count + stack_parameter_count;
+
+  LocationSignature::Builder locations(zone, 0, parameter_count);
+  MachineSignature::Builder types(zone, 0, parameter_count);
+
+  // Add parameters in registers and on the stack.
+  for (int i = 0; i < parameter_count; i++) {
+    if (i < register_parameter_count) {
+      // The first parameters go in registers.
+      Register reg = descriptor.GetRegisterParameter(i);
+      Representation rep =
+          RepresentationFromType(descriptor.GetParameterType(i));
+      locations.AddParam(regloc(reg));
+      types.AddParam(reptyp(rep));
+    } else {
+      // The rest of the parameters go on the stack.
+      int stack_slot = i - register_parameter_count - stack_parameter_count;
+      locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
+      types.AddParam(MachineType::AnyTagged());
+    }
+  }
+
+  // The target for interpreter dispatches is a code entry address.
+  MachineType target_type = MachineType::Pointer();
+  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  return new (zone) CallDescriptor(            // --
+      CallDescriptor::kCallAddress,            // kind
+      target_type,                             // target MachineType
+      target_loc,                              // target location
+      types.Build(),                           // machine_sig
+      locations.Build(),                       // location_sig
+      stack_parameter_count,                   // stack_parameter_count
+      Operator::kNoProperties,                 // properties
+      kNoCalleeSaved,                          // callee-saved registers
+      kNoCalleeSaved,                          // callee-saved fp
+      CallDescriptor::kCanUseRoots |           // flags
+          CallDescriptor::kSupportsTailCalls,  // flags
+      descriptor.DebugName(isolate));
+}
 
 LinkageLocation Linkage::GetOsrValueLocation(int index) const {
   CHECK(incoming_->IsJSFunctionCall());
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index a0434f8..958e8dc 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -152,20 +152,19 @@
   enum Flag {
     kNoFlags = 0u,
     kNeedsFrameState = 1u << 0,
-    kPatchableCallSite = 1u << 1,
-    kNeedsNopAfterCall = 1u << 2,
-    kHasExceptionHandler = 1u << 3,
-    kHasLocalCatchHandler = 1u << 4,
-    kSupportsTailCalls = 1u << 5,
-    kCanUseRoots = 1u << 6,
+    kHasExceptionHandler = 1u << 1,
+    kHasLocalCatchHandler = 1u << 2,
+    kSupportsTailCalls = 1u << 3,
+    kCanUseRoots = 1u << 4,
     // (arm64 only) native stack should be used for arguments.
-    kUseNativeStack = 1u << 7,
+    kUseNativeStack = 1u << 5,
     // (arm64 only) call instruction has to restore JSSP or CSP.
-    kRestoreJSSP = 1u << 8,
-    kRestoreCSP = 1u << 9,
+    kRestoreJSSP = 1u << 6,
+    kRestoreCSP = 1u << 7,
     // Causes the code generator to initialize the root register.
-    kInitializeRootRegister = 1u << 10,
-    kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
+    kInitializeRootRegister = 1u << 8,
+    // Does not ever try to allocate space on our heap.
+    kNoAllocate = 1u << 9
   };
   typedef base::Flags<Flag> Flags;
 
@@ -304,10 +303,11 @@
 // representing the architecture-specific location. The following call node
 // layouts are supported (where {n} is the number of value inputs):
 //
-//                  #0          #1     #2     #3     [...]             #n
-// Call[CodeStub]   code,       arg 1, arg 2, arg 3, [...],            context
-// Call[JSFunction] function,   rcvr,  arg 1, arg 2, [...], new, #arg, context
-// Call[Runtime]    CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
+//                        #0          #1     #2     [...]             #n
+// Call[CodeStub]         code,       arg 1, arg 2, [...],            context
+// Call[JSFunction]       function,   rcvr,  arg 1, [...], new, #arg, context
+// Call[Runtime]          CEntryStub, arg 1, arg 2, [...], fun, #arg, context
+// Call[BytecodeDispatch] address,    arg 1, arg 2, [...]
 class Linkage : public ZoneObject {
  public:
   explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
@@ -332,6 +332,11 @@
       MachineType return_type = MachineType::AnyTagged(),
       size_t return_count = 1);
 
+  static CallDescriptor* GetAllocateCallDescriptor(Zone* zone);
+  static CallDescriptor* GetBytecodeDispatchCallDescriptor(
+      Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+      int stack_parameter_count);
+
   // Creates a call descriptor for simplified C calls that is appropriate
   // for the host platform. This simplified calling convention only supports
   // integers and pointers of one word size each, i.e. no floating point,
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index e19368d..a451cfc 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -4,7 +4,6 @@
 
 #include "src/compiler/load-elimination.h"
 
-#include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
@@ -34,8 +33,9 @@
        effect = NodeProperties::GetEffectInput(effect)) {
     switch (effect->opcode()) {
       case IrOpcode::kLoadField: {
+        FieldAccess const effect_access = FieldAccessOf(effect->op());
         if (object == NodeProperties::GetValueInput(effect, 0) &&
-            access == FieldAccessOf(effect->op())) {
+            access == effect_access && effect_access.type->Is(access.type)) {
           Node* const value = effect;
           ReplaceWithValue(node, value);
           return Replace(value);
@@ -56,8 +56,8 @@
               return Replace(value);
             } else {
               Node* renamed = graph()->NewNode(
-                  common()->Guard(Type::Intersect(stored_value_type, load_type,
-                                                  graph()->zone())),
+                  simplified()->TypeGuard(Type::Intersect(
+                      stored_value_type, load_type, graph()->zone())),
                   value, NodeProperties::GetControlInput(node));
               ReplaceWithValue(node, renamed);
               return Replace(renamed);
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index 92c6dd0..4a1323b 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -11,25 +11,26 @@
 namespace internal {
 namespace compiler {
 
-class CommonOperatorBuilder;
 class Graph;
+class SimplifiedOperatorBuilder;
 
 class LoadElimination final : public AdvancedReducer {
  public:
   explicit LoadElimination(Editor* editor, Graph* graph,
-                           CommonOperatorBuilder* common)
-      : AdvancedReducer(editor), graph_(graph), common_(common) {}
+                           SimplifiedOperatorBuilder* simplified)
+      : AdvancedReducer(editor), graph_(graph), simplified_(simplified) {}
   ~LoadElimination() final;
 
   Reduction Reduce(Node* node) final;
 
  private:
-  CommonOperatorBuilder* common() const { return common_; }
-  Graph* graph() { return graph_; }
+  SimplifiedOperatorBuilder* simplified() const { return simplified_; }
+  Graph* graph() const { return graph_; }
 
   Reduction ReduceLoadField(Node* node);
-  Graph* graph_;
-  CommonOperatorBuilder* common_;
+
+  Graph* const graph_;
+  SimplifiedOperatorBuilder* const simplified_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 19ea062..4b50ffe 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -419,8 +419,12 @@
       if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
       break;
     }
-    case IrOpcode::kTruncateFloat64ToInt32:
-      return ReduceTruncateFloat64ToInt32(node);
+    case IrOpcode::kTruncateFloat64ToWord32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+      if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+      return NoChange();
+    }
     case IrOpcode::kTruncateInt64ToInt32: {
       Int64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
@@ -433,11 +437,18 @@
       if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
       break;
     }
+    case IrOpcode::kRoundFloat64ToInt32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+      if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
     case IrOpcode::kFloat64InsertLowWord32:
       return ReduceFloat64InsertLowWord32(node);
     case IrOpcode::kFloat64InsertHighWord32:
       return ReduceFloat64InsertHighWord32(node);
     case IrOpcode::kStore:
+    case IrOpcode::kCheckedStore:
       return ReduceStore(node);
     case IrOpcode::kFloat64Equal:
     case IrOpcode::kFloat64LessThan:
@@ -645,41 +656,20 @@
 }
 
 
-Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
-  Float64Matcher m(node->InputAt(0));
-  if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
-  if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
-  if (m.IsPhi()) {
-    Node* const phi = m.node();
-    DCHECK_EQ(MachineRepresentation::kFloat64, PhiRepresentationOf(phi->op()));
-    if (phi->OwnedBy(node)) {
-      // TruncateFloat64ToInt32[mode](Phi[Float64](x1,...,xn))
-      //   => Phi[Int32](TruncateFloat64ToInt32[mode](x1),
-      //                 ...,
-      //                 TruncateFloat64ToInt32[mode](xn))
-      const int value_input_count = phi->InputCount() - 1;
-      for (int i = 0; i < value_input_count; ++i) {
-        Node* input = graph()->NewNode(node->op(), phi->InputAt(i));
-        // TODO(bmeurer): Reschedule input for reduction once we have Revisit()
-        // instead of recursing into ReduceTruncateFloat64ToInt32() here.
-        Reduction reduction = ReduceTruncateFloat64ToInt32(input);
-        if (reduction.Changed()) input = reduction.replacement();
-        phi->ReplaceInput(i, input);
-      }
-      NodeProperties::ChangeOp(
-          phi,
-          common()->Phi(MachineRepresentation::kWord32, value_input_count));
-      return Replace(phi);
-    }
-  }
-  return NoChange();
-}
-
-
 Reduction MachineOperatorReducer::ReduceStore(Node* node) {
-  MachineRepresentation const rep =
-      StoreRepresentationOf(node->op()).representation();
-  Node* const value = node->InputAt(2);
+  NodeMatcher nm(node);
+  MachineRepresentation rep;
+  int value_input;
+  if (nm.IsCheckedStore()) {
+    rep = CheckedStoreRepresentationOf(node->op());
+    value_input = 3;
+  } else {
+    rep = StoreRepresentationOf(node->op()).representation();
+    value_input = 2;
+  }
+
+  Node* const value = node->InputAt(value_input);
+
   switch (value->opcode()) {
     case IrOpcode::kWord32And: {
       Uint32BinopMatcher m(value);
@@ -687,7 +677,7 @@
                                     (m.right().Value() & 0xff) == 0xff) ||
                                    (rep == MachineRepresentation::kWord16 &&
                                     (m.right().Value() & 0xffff) == 0xffff))) {
-        node->ReplaceInput(2, m.left().node());
+        node->ReplaceInput(value_input, m.left().node());
         return Changed(node);
       }
       break;
@@ -700,7 +690,7 @@
                                       m.right().IsInRange(1, 16)))) {
         Int32BinopMatcher mleft(m.left().node());
         if (mleft.right().Is(m.right().Value())) {
-          node->ReplaceInput(2, mleft.left().node());
+          node->ReplaceInput(value_input, mleft.left().node());
           return Changed(node);
         }
       }
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index 7f8ff1a..cddf13d 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -70,7 +70,6 @@
   Reduction ReduceUint32Div(Node* node);
   Reduction ReduceInt32Mod(Node* node);
   Reduction ReduceUint32Mod(Node* node);
-  Reduction ReduceTruncateFloat64ToInt32(Node* node);
   Reduction ReduceStore(Node* node);
   Reduction ReduceProjection(size_t index, Node* node);
   Reduction ReduceWord32Shifts(Node* node);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 6a506d2..0d229c7 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -12,40 +12,6 @@
 namespace internal {
 namespace compiler {
 
-std::ostream& operator<<(std::ostream& os, TruncationMode mode) {
-  switch (mode) {
-    case TruncationMode::kJavaScript:
-      return os << "JavaScript";
-    case TruncationMode::kRoundToZero:
-      return os << "RoundToZero";
-  }
-  UNREACHABLE();
-  return os;
-}
-
-
-TruncationMode TruncationModeOf(Operator const* op) {
-  DCHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, op->opcode());
-  return OpParameter<TruncationMode>(op);
-}
-
-
-std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
-  switch (kind) {
-    case kNoWriteBarrier:
-      return os << "NoWriteBarrier";
-    case kMapWriteBarrier:
-      return os << "MapWriteBarrier";
-    case kPointerWriteBarrier:
-      return os << "PointerWriteBarrier";
-    case kFullWriteBarrier:
-      return os << "FullWriteBarrier";
-  }
-  UNREACHABLE();
-  return os;
-}
-
-
 bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
   return lhs.representation() == rhs.representation() &&
          lhs.write_barrier_kind() == rhs.write_barrier_kind();
@@ -69,7 +35,8 @@
 
 
 LoadRepresentation LoadRepresentationOf(Operator const* op) {
-  DCHECK_EQ(IrOpcode::kLoad, op->opcode());
+  DCHECK(IrOpcode::kLoad == op->opcode() ||
+         IrOpcode::kAtomicLoad == op->opcode());
   return OpParameter<LoadRepresentation>(op);
 }
 
@@ -96,6 +63,11 @@
   return OpParameter<MachineRepresentation>(op);
 }
 
+MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kAtomicStore, op->opcode());
+  return OpParameter<MachineRepresentation>(op);
+}
+
 #define PURE_OP_LIST(V)                                                       \
   V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
   V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
@@ -145,6 +117,8 @@
   V(Uint64Mod, Operator::kNoProperties, 2, 1, 1)                              \
   V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1)                         \
   V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                  \
+  V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1)                    \
+  V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1)                \
   V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
   V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
   V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                  \
@@ -156,6 +130,7 @@
   V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2)             \
   V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2)             \
   V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
+  V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                    \
@@ -174,12 +149,14 @@
   V(Float32Abs, Operator::kNoProperties, 1, 0, 1)                             \
   V(Float32Add, Operator::kCommutative, 2, 0, 1)                              \
   V(Float32Sub, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Float32SubPreserveNan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Float32Mul, Operator::kCommutative, 2, 0, 1)                              \
   V(Float32Div, Operator::kNoProperties, 2, 0, 1)                             \
   V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1)                            \
   V(Float64Abs, Operator::kNoProperties, 1, 0, 1)                             \
   V(Float64Add, Operator::kCommutative, 2, 0, 1)                              \
   V(Float64Sub, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Float64SubPreserveNan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Float64Mul, Operator::kCommutative, 2, 0, 1)                              \
   V(Float64Div, Operator::kNoProperties, 2, 0, 1)                             \
   V(Float64Mod, Operator::kNoProperties, 2, 0, 1)                             \
@@ -202,7 +179,179 @@
   V(Int32PairMul, Operator::kNoProperties, 4, 0, 2)                           \
   V(Word32PairShl, Operator::kNoProperties, 3, 0, 2)                          \
   V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                          \
-  V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)
+  V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)                          \
+  V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1)                        \
+  V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                   \
+  V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1)                   \
+  V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1)               \
+  V(Float32x4Add, Operator::kCommutative, 2, 0, 1)                            \
+  V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1)                           \
+  V(Float32x4Mul, Operator::kCommutative, 2, 0, 1)                            \
+  V(Float32x4Div, Operator::kNoProperties, 2, 0, 1)                           \
+  V(Float32x4Min, Operator::kCommutative, 2, 0, 1)                            \
+  V(Float32x4Max, Operator::kCommutative, 2, 0, 1)                            \
+  V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1)                         \
+  V(Float32x4Equal, Operator::kCommutative, 2, 0, 1)                          \
+  V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1)                       \
+  V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1)                      \
+  V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)               \
+  V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
+  V(Float32x4Select, Operator::kNoProperties, 3, 0, 1)                        \
+  V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                       \
+  V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                       \
+  V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1)                   \
+  V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1)                  \
+  V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1)                          \
+  V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                     \
+  V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Int32x4Add, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Int32x4Mul, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int32x4Min, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int32x4Max, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)               \
+  V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int32x4Equal, Operator::kCommutative, 2, 0, 1)                            \
+  V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1)                         \
+  V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int32x4Select, Operator::kNoProperties, 3, 0, 1)                          \
+  V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                         \
+  V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                         \
+  V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                   \
+  V(Uint32x4Min, Operator::kCommutative, 2, 0, 1)                             \
+  V(Uint32x4Max, Operator::kCommutative, 2, 0, 1)                             \
+  V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                  \
+  V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1)                         \
+  V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
+  V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                        \
+  V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                        \
+  V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1)                        \
+  V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1)                          \
+  V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                     \
+  V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Int16x8Add, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                      \
+  V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int16x8Mul, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int16x8Min, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int16x8Max, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)               \
+  V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int16x8Equal, Operator::kCommutative, 2, 0, 1)                            \
+  V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1)                         \
+  V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int16x8Select, Operator::kNoProperties, 3, 0, 1)                          \
+  V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                         \
+  V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                        \
+  V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                     \
+  V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint16x8Min, Operator::kCommutative, 2, 0, 1)                             \
+  V(Uint16x8Max, Operator::kCommutative, 2, 0, 1)                             \
+  V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1)                         \
+  V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
+  V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                        \
+  V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                       \
+  V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1)                        \
+  V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1)                         \
+  V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                     \
+  V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1)                             \
+  V(Int8x16Add, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                      \
+  V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int8x16Mul, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int8x16Min, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int8x16Max, Operator::kCommutative, 2, 0, 1)                              \
+  V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)               \
+  V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int8x16Equal, Operator::kCommutative, 2, 0, 1)                            \
+  V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1)                         \
+  V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
+  V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                     \
+  V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
+  V(Int8x16Select, Operator::kNoProperties, 3, 0, 1)                          \
+  V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                        \
+  V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                        \
+  V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                     \
+  V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint8x16Min, Operator::kCommutative, 2, 0, 1)                             \
+  V(Uint8x16Max, Operator::kCommutative, 2, 0, 1)                             \
+  V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)              \
+  V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)             \
+  V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1)                       \
+  V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                \
+  V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
+  V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1)                        \
+  V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                    \
+  V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                    \
+  V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)    \
+  V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1)                        \
+  V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                       \
+  V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                       \
+  V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1)                           \
+  V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1)                        \
+  V(Simd128Load, Operator::kNoProperties, 2, 0, 1)                            \
+  V(Simd128Load1, Operator::kNoProperties, 2, 0, 1)                           \
+  V(Simd128Load2, Operator::kNoProperties, 2, 0, 1)                           \
+  V(Simd128Load3, Operator::kNoProperties, 2, 0, 1)                           \
+  V(Simd128Store, Operator::kNoProperties, 3, 0, 1)                           \
+  V(Simd128Store1, Operator::kNoProperties, 3, 0, 1)                          \
+  V(Simd128Store2, Operator::kNoProperties, 3, 0, 1)                          \
+  V(Simd128Store3, Operator::kNoProperties, 3, 0, 1)                          \
+  V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)     \
+  V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
   V(Word32Ctz, Operator::kNoProperties, 1, 0, 1)            \
@@ -250,6 +399,19 @@
   V(kWord64)                           \
   V(kTagged)
 
+#define ATOMIC_TYPE_LIST(V) \
+  V(Int8)                   \
+  V(Uint8)                  \
+  V(Int16)                  \
+  V(Uint16)                 \
+  V(Int32)                  \
+  V(Uint32)
+
+#define ATOMIC_REPRESENTATION_LIST(V) \
+  V(kWord8)                           \
+  V(kWord16)                          \
+  V(kWord32)
+
 struct MachineOperatorGlobalCache {
 #define PURE(Name, properties, value_input_count, control_input_count,         \
              output_count)                                                     \
@@ -264,19 +426,6 @@
   PURE_OPTIONAL_OP_LIST(PURE)
 #undef PURE
 
-  template <TruncationMode kMode>
-  struct TruncateFloat64ToInt32Operator final
-      : public Operator1<TruncationMode> {
-    TruncateFloat64ToInt32Operator()
-        : Operator1<TruncationMode>(IrOpcode::kTruncateFloat64ToInt32,
-                                    Operator::kPure, "TruncateFloat64ToInt32",
-                                    1, 0, 0, 1, 0, 0, kMode) {}
-  };
-  TruncateFloat64ToInt32Operator<TruncationMode::kJavaScript>
-      kTruncateFloat64ToInt32JavaScript;
-  TruncateFloat64ToInt32Operator<TruncationMode::kRoundToZero>
-      kTruncateFloat64ToInt32RoundToZero;
-
 #define LOAD(Type)                                                             \
   struct Load##Type##Operator final : public Operator1<LoadRepresentation> {   \
     Load##Type##Operator()                                                     \
@@ -353,6 +502,30 @@
   CheckedStore##Type##Operator kCheckedStore##Type;
   MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
+
+#define ATOMIC_LOAD(Type)                                                     \
+  struct AtomicLoad##Type##Operator final                                     \
+      : public Operator1<LoadRepresentation> {                                \
+    AtomicLoad##Type##Operator()                                              \
+        : Operator1<LoadRepresentation>(                                      \
+              IrOpcode::kAtomicLoad, Operator::kNoThrow | Operator::kNoWrite, \
+              "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}         \
+  };                                                                          \
+  AtomicLoad##Type##Operator kAtomicLoad##Type;
+  ATOMIC_TYPE_LIST(ATOMIC_LOAD)
+#undef ATOMIC_LOAD
+
+#define ATOMIC_STORE(Type)                                                     \
+  struct AtomicStore##Type##Operator                                           \
+      : public Operator1<MachineRepresentation> {                              \
+    AtomicStore##Type##Operator()                                              \
+        : Operator1<MachineRepresentation>(                                    \
+              IrOpcode::kAtomicStore, Operator::kNoRead | Operator::kNoThrow,  \
+              "AtomicStore", 3, 1, 1, 0, 1, 0, MachineRepresentation::Type) {} \
+  };                                                                           \
+  AtomicStore##Type##Operator kAtomicStore##Type;
+  ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
+#undef STORE
 };
 
 
@@ -384,19 +557,6 @@
 #undef PURE
 
 
-const Operator* MachineOperatorBuilder::TruncateFloat64ToInt32(
-    TruncationMode mode) {
-  switch (mode) {
-    case TruncationMode::kJavaScript:
-      return &cache_.kTruncateFloat64ToInt32JavaScript;
-    case TruncationMode::kRoundToZero:
-      return &cache_.kTruncateFloat64ToInt32RoundToZero;
-  }
-  UNREACHABLE();
-  return nullptr;
-}
-
-
 const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
 #define LOAD(Type)                  \
   if (rep == MachineType::Type()) { \
@@ -487,6 +647,29 @@
 const Operator* MachineOperatorBuilder::Word64CtzPlaceholder() {
   return &cache_.kWord64Ctz;
 }
+
+const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
+#define LOAD(Type)                    \
+  if (rep == MachineType::Type()) {   \
+    return &cache_.kAtomicLoad##Type; \
+  }
+  ATOMIC_TYPE_LIST(LOAD)
+#undef LOAD
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
+#define STORE(kRep)                         \
+  if (rep == MachineRepresentation::kRep) { \
+    return &cache_.kAtomicStore##kRep;      \
+  }
+  ATOMIC_REPRESENTATION_LIST(STORE)
+#undef STORE
+  UNREACHABLE();
+  return nullptr;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 68e393a..814f6c9 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -33,32 +33,6 @@
 };
 
 
-// Supported float64 to int32 truncation modes.
-enum class TruncationMode : uint8_t {
-  kJavaScript,  // ES6 section 7.1.5
-  kRoundToZero  // Round towards zero. Implementation defined for NaN and ovf.
-};
-
-V8_INLINE size_t hash_value(TruncationMode mode) {
-  return static_cast<uint8_t>(mode);
-}
-
-std::ostream& operator<<(std::ostream&, TruncationMode);
-
-TruncationMode TruncationModeOf(Operator const*);
-
-
-// Supported write barrier modes.
-enum WriteBarrierKind {
-  kNoWriteBarrier,
-  kMapWriteBarrier,
-  kPointerWriteBarrier,
-  kFullWriteBarrier
-};
-
-std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
-
-
 // A Load needs a MachineType.
 typedef MachineType LoadRepresentation;
 
@@ -104,6 +78,8 @@
 
 MachineRepresentation StackSlotRepresentationOf(Operator const* op);
 
+MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
+
 // Interface for building machine-level operators. These operators are
 // machine-level but machine-independent and thus define a language suitable
 // for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -220,6 +196,12 @@
   const Operator* Uint64LessThanOrEqual();
   const Operator* Uint64Mod();
 
+  // This operator reinterprets the bits of a word as tagged pointer.
+  const Operator* BitcastWordToTagged();
+
+  // JavaScript float64 to int32/uint32 truncation.
+  const Operator* TruncateFloat64ToWord32();
+
   // These operators change the representation of numbers while preserving the
   // value of the number. Narrowing operators assume the input is representable
   // in the target type and are *not* defined for other inputs.
@@ -243,8 +225,8 @@
   // These operators truncate or round numbers, both changing the representation
   // of the number and mapping multiple input values onto the same output value.
   const Operator* TruncateFloat64ToFloat32();
-  const Operator* TruncateFloat64ToInt32(TruncationMode);
   const Operator* TruncateInt64ToInt32();
+  const Operator* RoundFloat64ToInt32();
   const Operator* RoundInt32ToFloat32();
   const Operator* RoundInt64ToFloat32();
   const Operator* RoundInt64ToFloat64();
@@ -263,6 +245,7 @@
   // (single-precision).
   const Operator* Float32Add();
   const Operator* Float32Sub();
+  const Operator* Float32SubPreserveNan();
   const Operator* Float32Mul();
   const Operator* Float32Div();
   const Operator* Float32Sqrt();
@@ -271,6 +254,7 @@
   // (double-precision).
   const Operator* Float64Add();
   const Operator* Float64Sub();
+  const Operator* Float64SubPreserveNan();
   const Operator* Float64Mul();
   const Operator* Float64Div();
   const Operator* Float64Mod();
@@ -317,6 +301,190 @@
   const Operator* Float64InsertLowWord32();
   const Operator* Float64InsertHighWord32();
 
+  // SIMD operators.
+  const Operator* CreateFloat32x4();
+  const Operator* Float32x4ExtractLane();
+  const Operator* Float32x4ReplaceLane();
+  const Operator* Float32x4Abs();
+  const Operator* Float32x4Neg();
+  const Operator* Float32x4Sqrt();
+  const Operator* Float32x4RecipApprox();
+  const Operator* Float32x4RecipSqrtApprox();
+  const Operator* Float32x4Add();
+  const Operator* Float32x4Sub();
+  const Operator* Float32x4Mul();
+  const Operator* Float32x4Div();
+  const Operator* Float32x4Min();
+  const Operator* Float32x4Max();
+  const Operator* Float32x4MinNum();
+  const Operator* Float32x4MaxNum();
+  const Operator* Float32x4Equal();
+  const Operator* Float32x4NotEqual();
+  const Operator* Float32x4LessThan();
+  const Operator* Float32x4LessThanOrEqual();
+  const Operator* Float32x4GreaterThan();
+  const Operator* Float32x4GreaterThanOrEqual();
+  const Operator* Float32x4Select();
+  const Operator* Float32x4Swizzle();
+  const Operator* Float32x4Shuffle();
+  const Operator* Float32x4FromInt32x4();
+  const Operator* Float32x4FromUint32x4();
+
+  const Operator* CreateInt32x4();
+  const Operator* Int32x4ExtractLane();
+  const Operator* Int32x4ReplaceLane();
+  const Operator* Int32x4Neg();
+  const Operator* Int32x4Add();
+  const Operator* Int32x4Sub();
+  const Operator* Int32x4Mul();
+  const Operator* Int32x4Min();
+  const Operator* Int32x4Max();
+  const Operator* Int32x4ShiftLeftByScalar();
+  const Operator* Int32x4ShiftRightByScalar();
+  const Operator* Int32x4Equal();
+  const Operator* Int32x4NotEqual();
+  const Operator* Int32x4LessThan();
+  const Operator* Int32x4LessThanOrEqual();
+  const Operator* Int32x4GreaterThan();
+  const Operator* Int32x4GreaterThanOrEqual();
+  const Operator* Int32x4Select();
+  const Operator* Int32x4Swizzle();
+  const Operator* Int32x4Shuffle();
+  const Operator* Int32x4FromFloat32x4();
+
+  const Operator* Uint32x4Min();
+  const Operator* Uint32x4Max();
+  const Operator* Uint32x4ShiftLeftByScalar();
+  const Operator* Uint32x4ShiftRightByScalar();
+  const Operator* Uint32x4LessThan();
+  const Operator* Uint32x4LessThanOrEqual();
+  const Operator* Uint32x4GreaterThan();
+  const Operator* Uint32x4GreaterThanOrEqual();
+  const Operator* Uint32x4FromFloat32x4();
+
+  const Operator* CreateBool32x4();
+  const Operator* Bool32x4ExtractLane();
+  const Operator* Bool32x4ReplaceLane();
+  const Operator* Bool32x4And();
+  const Operator* Bool32x4Or();
+  const Operator* Bool32x4Xor();
+  const Operator* Bool32x4Not();
+  const Operator* Bool32x4AnyTrue();
+  const Operator* Bool32x4AllTrue();
+  const Operator* Bool32x4Swizzle();
+  const Operator* Bool32x4Shuffle();
+  const Operator* Bool32x4Equal();
+  const Operator* Bool32x4NotEqual();
+
+  const Operator* CreateInt16x8();
+  const Operator* Int16x8ExtractLane();
+  const Operator* Int16x8ReplaceLane();
+  const Operator* Int16x8Neg();
+  const Operator* Int16x8Add();
+  const Operator* Int16x8AddSaturate();
+  const Operator* Int16x8Sub();
+  const Operator* Int16x8SubSaturate();
+  const Operator* Int16x8Mul();
+  const Operator* Int16x8Min();
+  const Operator* Int16x8Max();
+  const Operator* Int16x8ShiftLeftByScalar();
+  const Operator* Int16x8ShiftRightByScalar();
+  const Operator* Int16x8Equal();
+  const Operator* Int16x8NotEqual();
+  const Operator* Int16x8LessThan();
+  const Operator* Int16x8LessThanOrEqual();
+  const Operator* Int16x8GreaterThan();
+  const Operator* Int16x8GreaterThanOrEqual();
+  const Operator* Int16x8Select();
+  const Operator* Int16x8Swizzle();
+  const Operator* Int16x8Shuffle();
+
+  const Operator* Uint16x8AddSaturate();
+  const Operator* Uint16x8SubSaturate();
+  const Operator* Uint16x8Min();
+  const Operator* Uint16x8Max();
+  const Operator* Uint16x8ShiftLeftByScalar();
+  const Operator* Uint16x8ShiftRightByScalar();
+  const Operator* Uint16x8LessThan();
+  const Operator* Uint16x8LessThanOrEqual();
+  const Operator* Uint16x8GreaterThan();
+  const Operator* Uint16x8GreaterThanOrEqual();
+
+  const Operator* CreateBool16x8();
+  const Operator* Bool16x8ExtractLane();
+  const Operator* Bool16x8ReplaceLane();
+  const Operator* Bool16x8And();
+  const Operator* Bool16x8Or();
+  const Operator* Bool16x8Xor();
+  const Operator* Bool16x8Not();
+  const Operator* Bool16x8AnyTrue();
+  const Operator* Bool16x8AllTrue();
+  const Operator* Bool16x8Swizzle();
+  const Operator* Bool16x8Shuffle();
+  const Operator* Bool16x8Equal();
+  const Operator* Bool16x8NotEqual();
+
+  const Operator* CreateInt8x16();
+  const Operator* Int8x16ExtractLane();
+  const Operator* Int8x16ReplaceLane();
+  const Operator* Int8x16Neg();
+  const Operator* Int8x16Add();
+  const Operator* Int8x16AddSaturate();
+  const Operator* Int8x16Sub();
+  const Operator* Int8x16SubSaturate();
+  const Operator* Int8x16Mul();
+  const Operator* Int8x16Min();
+  const Operator* Int8x16Max();
+  const Operator* Int8x16ShiftLeftByScalar();
+  const Operator* Int8x16ShiftRightByScalar();
+  const Operator* Int8x16Equal();
+  const Operator* Int8x16NotEqual();
+  const Operator* Int8x16LessThan();
+  const Operator* Int8x16LessThanOrEqual();
+  const Operator* Int8x16GreaterThan();
+  const Operator* Int8x16GreaterThanOrEqual();
+  const Operator* Int8x16Select();
+  const Operator* Int8x16Swizzle();
+  const Operator* Int8x16Shuffle();
+
+  const Operator* Uint8x16AddSaturate();
+  const Operator* Uint8x16SubSaturate();
+  const Operator* Uint8x16Min();
+  const Operator* Uint8x16Max();
+  const Operator* Uint8x16ShiftLeftByScalar();
+  const Operator* Uint8x16ShiftRightByScalar();
+  const Operator* Uint8x16LessThan();
+  const Operator* Uint8x16LessThanOrEqual();
+  const Operator* Uint8x16GreaterThan();
+  const Operator* Uint8x16GreaterThanOrEqual();
+
+  const Operator* CreateBool8x16();
+  const Operator* Bool8x16ExtractLane();
+  const Operator* Bool8x16ReplaceLane();
+  const Operator* Bool8x16And();
+  const Operator* Bool8x16Or();
+  const Operator* Bool8x16Xor();
+  const Operator* Bool8x16Not();
+  const Operator* Bool8x16AnyTrue();
+  const Operator* Bool8x16AllTrue();
+  const Operator* Bool8x16Swizzle();
+  const Operator* Bool8x16Shuffle();
+  const Operator* Bool8x16Equal();
+  const Operator* Bool8x16NotEqual();
+
+  const Operator* Simd128Load();
+  const Operator* Simd128Load1();
+  const Operator* Simd128Load2();
+  const Operator* Simd128Load3();
+  const Operator* Simd128Store();
+  const Operator* Simd128Store1();
+  const Operator* Simd128Store2();
+  const Operator* Simd128Store3();
+  const Operator* Simd128And();
+  const Operator* Simd128Or();
+  const Operator* Simd128Xor();
+  const Operator* Simd128Not();
+
   // load [base + index]
   const Operator* Load(LoadRepresentation rep);
 
@@ -335,6 +503,11 @@
   // checked-store heap, index, length, value
   const Operator* CheckedStore(CheckedStoreRepresentation);
 
+  // atomic-load [base + index]
+  const Operator* AtomicLoad(LoadRepresentation rep);
+  // atomic-store [base + index], value
+  const Operator* AtomicStore(MachineRepresentation rep);
+
   // Target machine word-size assumed by this builder.
   bool Is32() const { return word() == MachineRepresentation::kWord32; }
   bool Is64() const { return word() == MachineRepresentation::kWord64; }
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc
new file mode 100644
index 0000000..59fd899
--- /dev/null
+++ b/src/compiler/memory-optimizer.cc
@@ -0,0 +1,494 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/memory-optimizer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone)
+    : jsgraph_(jsgraph),
+      empty_state_(AllocationState::Empty(zone)),
+      pending_(zone),
+      tokens_(zone),
+      zone_(zone) {}
+
+void MemoryOptimizer::Optimize() {
+  EnqueueUses(graph()->start(), empty_state());
+  while (!tokens_.empty()) {
+    Token const token = tokens_.front();
+    tokens_.pop();
+    VisitNode(token.node, token.state);
+  }
+  DCHECK(pending_.empty());
+  DCHECK(tokens_.empty());
+}
+
+MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
+                                                  PretenureFlag pretenure,
+                                                  Zone* zone)
+    : node_ids_(zone), pretenure_(pretenure), size_(nullptr) {
+  node_ids_.insert(node->id());
+}
+
+MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
+                                                  PretenureFlag pretenure,
+                                                  Node* size, Zone* zone)
+    : node_ids_(zone), pretenure_(pretenure), size_(size) {
+  node_ids_.insert(node->id());
+}
+
+void MemoryOptimizer::AllocationGroup::Add(Node* node) {
+  node_ids_.insert(node->id());
+}
+
+bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
+  return node_ids_.find(node->id()) != node_ids_.end();
+}
+
+MemoryOptimizer::AllocationState::AllocationState()
+    : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
+
+MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
+    : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
+
+MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
+                                                  int size, Node* top)
+    : group_(group), size_(size), top_(top) {}
+
+bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
+  return group() && group()->IsNewSpaceAllocation();
+}
+
+void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
+  DCHECK(!node->IsDead());
+  DCHECK_LT(0, node->op()->EffectInputCount());
+  switch (node->opcode()) {
+    case IrOpcode::kAllocate:
+      return VisitAllocate(node, state);
+    case IrOpcode::kCall:
+      return VisitCall(node, state);
+    case IrOpcode::kLoadElement:
+      return VisitLoadElement(node, state);
+    case IrOpcode::kLoadField:
+      return VisitLoadField(node, state);
+    case IrOpcode::kStoreElement:
+      return VisitStoreElement(node, state);
+    case IrOpcode::kStoreField:
+      return VisitStoreField(node, state);
+    case IrOpcode::kCheckedLoad:
+    case IrOpcode::kCheckedStore:
+    case IrOpcode::kIfException:
+    case IrOpcode::kLoad:
+    case IrOpcode::kStore:
+      return VisitOtherEffect(node, state);
+    default:
+      break;
+  }
+  DCHECK_EQ(0, node->op()->EffectOutputCount());
+}
+
+void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
+  DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
+  Node* value;
+  Node* size = node->InputAt(0);
+  Node* effect = node->InputAt(1);
+  Node* control = node->InputAt(2);
+  PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
+
+  // Determine the top/limit addresses.
+  Node* top_address = jsgraph()->ExternalConstant(
+      pretenure == NOT_TENURED
+          ? ExternalReference::new_space_allocation_top_address(isolate())
+          : ExternalReference::old_space_allocation_top_address(isolate()));
+  Node* limit_address = jsgraph()->ExternalConstant(
+      pretenure == NOT_TENURED
+          ? ExternalReference::new_space_allocation_limit_address(isolate())
+          : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+  // Check if we can fold this allocation into a previous allocation represented
+  // by the incoming {state}.
+  Int32Matcher m(size);
+  if (m.HasValue() && m.Value() < Page::kMaxRegularHeapObjectSize) {
+    int32_t const object_size = m.Value();
+    if (state->size() <= Page::kMaxRegularHeapObjectSize - object_size &&
+        state->group()->pretenure() == pretenure) {
+      // We can fold this Allocate {node} into the allocation {group}
+      // represented by the given {state}. Compute the upper bound for
+      // the new {state}.
+      int32_t const state_size = state->size() + object_size;
+
+      // Update the reservation check to the actual maximum upper bound.
+      AllocationGroup* const group = state->group();
+      if (OpParameter<int32_t>(group->size()) < state_size) {
+        NodeProperties::ChangeOp(group->size(),
+                                 common()->Int32Constant(state_size));
+      }
+
+      // Update the allocation top with the new object allocation.
+      // TODO(bmeurer): Defer writing back top as much as possible.
+      Node* top = graph()->NewNode(machine()->IntAdd(), state->top(),
+                                   jsgraph()->IntPtrConstant(object_size));
+      effect = graph()->NewNode(
+          machine()->Store(StoreRepresentation(
+              MachineType::PointerRepresentation(), kNoWriteBarrier)),
+          top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+
+      // Compute the effective inner allocated address.
+      value = graph()->NewNode(
+          machine()->BitcastWordToTagged(),
+          graph()->NewNode(machine()->IntAdd(), state->top(),
+                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+
+      // Extend the allocation {group}.
+      group->Add(value);
+      state = AllocationState::Open(group, state_size, top, zone());
+    } else {
+      // Setup a mutable reservation size node; will be patched as we fold
+      // additional allocations into this new group.
+      Node* size = graph()->NewNode(common()->Int32Constant(object_size));
+
+      // Load allocation top and limit.
+      Node* top = effect =
+          graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
+                           jsgraph()->IntPtrConstant(0), effect, control);
+      Node* limit = effect = graph()->NewNode(
+          machine()->Load(MachineType::Pointer()), limit_address,
+          jsgraph()->IntPtrConstant(0), effect, control);
+
+      // Check if we need to collect garbage before we can start bump pointer
+      // allocation (always done for folded allocations).
+      Node* check = graph()->NewNode(
+          machine()->UintLessThan(),
+          graph()->NewNode(
+              machine()->IntAdd(), top,
+              machine()->Is64()
+                  ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
+                  : size),
+          limit);
+      Node* branch =
+          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+      Node* etrue = effect;
+      Node* vtrue = top;
+
+      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+      Node* efalse = effect;
+      Node* vfalse;
+      {
+        Node* target = pretenure == NOT_TENURED
+                           ? jsgraph()->AllocateInNewSpaceStubConstant()
+                           : jsgraph()->AllocateInOldSpaceStubConstant();
+        if (!allocate_operator_.is_set()) {
+          CallDescriptor* descriptor =
+              Linkage::GetAllocateCallDescriptor(graph()->zone());
+          allocate_operator_.set(common()->Call(descriptor));
+        }
+        vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target,
+                                           size, efalse, if_false);
+        vfalse = graph()->NewNode(machine()->IntSub(), vfalse,
+                                  jsgraph()->IntPtrConstant(kHeapObjectTag));
+      }
+
+      control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+      effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+      value = graph()->NewNode(
+          common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse,
+          control);
+
+      // Compute the new top and write it back.
+      top = graph()->NewNode(machine()->IntAdd(), value,
+                             jsgraph()->IntPtrConstant(object_size));
+      effect = graph()->NewNode(
+          machine()->Store(StoreRepresentation(
+              MachineType::PointerRepresentation(), kNoWriteBarrier)),
+          top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+
+      // Compute the initial object address.
+      value = graph()->NewNode(
+          machine()->BitcastWordToTagged(),
+          graph()->NewNode(machine()->IntAdd(), value,
+                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+
+      // Start a new allocation group.
+      AllocationGroup* group =
+          new (zone()) AllocationGroup(value, pretenure, size, zone());
+      state = AllocationState::Open(group, object_size, top, zone());
+    }
+  } else {
+    // Load allocation top and limit.
+    Node* top = effect =
+        graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
+                         jsgraph()->IntPtrConstant(0), effect, control);
+    Node* limit = effect =
+        graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
+                         jsgraph()->IntPtrConstant(0), effect, control);
+
+    // Compute the new top.
+    Node* new_top = graph()->NewNode(
+        machine()->IntAdd(), top,
+        machine()->Is64()
+            ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
+            : size);
+
+    // Check if we can do bump pointer allocation here.
+    Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
+    Node* branch =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* etrue = effect;
+    Node* vtrue;
+    {
+      etrue = graph()->NewNode(
+          machine()->Store(StoreRepresentation(
+              MachineType::PointerRepresentation(), kNoWriteBarrier)),
+          top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
+      vtrue = graph()->NewNode(
+          machine()->BitcastWordToTagged(),
+          graph()->NewNode(machine()->IntAdd(), top,
+                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+    }
+
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+    Node* efalse = effect;
+    Node* vfalse;
+    {
+      Node* target = pretenure == NOT_TENURED
+                         ? jsgraph()->AllocateInNewSpaceStubConstant()
+                         : jsgraph()->AllocateInOldSpaceStubConstant();
+      if (!allocate_operator_.is_set()) {
+        CallDescriptor* descriptor =
+            Linkage::GetAllocateCallDescriptor(graph()->zone());
+        allocate_operator_.set(common()->Call(descriptor));
+      }
+      vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
+                                         efalse, if_false);
+    }
+
+    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+    value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                             vtrue, vfalse, control);
+
+    // Create an unfoldable allocation group.
+    AllocationGroup* group =
+        new (zone()) AllocationGroup(value, pretenure, zone());
+    state = AllocationState::Closed(group, zone());
+  }
+
+  // Replace all effect uses of {node} with the {effect}, enqueue the
+  // effect uses for further processing, and replace all value uses of
+  // {node} with the {value}.
+  for (Edge edge : node->use_edges()) {
+    if (NodeProperties::IsEffectEdge(edge)) {
+      EnqueueUse(edge.from(), edge.index(), state);
+      edge.UpdateTo(effect);
+    } else {
+      DCHECK(NodeProperties::IsValueEdge(edge));
+      edge.UpdateTo(value);
+    }
+  }
+
+  // Kill the {node} to make sure we don't leave dangling dead uses.
+  node->Kill();
+}
+
+void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
+  DCHECK_EQ(IrOpcode::kCall, node->opcode());
+  // If the call can allocate, we start with a fresh state.
+  if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
+    state = empty_state();
+  }
+  EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitLoadElement(Node* node,
+                                       AllocationState const* state) {
+  DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
+  ElementAccess const& access = ElementAccessOf(node->op());
+  Node* index = node->InputAt(1);
+  node->ReplaceInput(1, ComputeIndex(access, index));
+  NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+  EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
+  DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
+  FieldAccess const& access = FieldAccessOf(node->op());
+  Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+  node->InsertInput(graph()->zone(), 1, offset);
+  NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+  EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitStoreElement(Node* node,
+                                        AllocationState const* state) {
+  DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
+  ElementAccess const& access = ElementAccessOf(node->op());
+  Node* object = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  WriteBarrierKind write_barrier_kind =
+      ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+  node->ReplaceInput(1, ComputeIndex(access, index));
+  NodeProperties::ChangeOp(
+      node, machine()->Store(StoreRepresentation(
+                access.machine_type.representation(), write_barrier_kind)));
+  EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitStoreField(Node* node,
+                                      AllocationState const* state) {
+  DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
+  FieldAccess const& access = FieldAccessOf(node->op());
+  Node* object = node->InputAt(0);
+  WriteBarrierKind write_barrier_kind =
+      ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+  Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+  node->InsertInput(graph()->zone(), 1, offset);
+  NodeProperties::ChangeOp(
+      node, machine()->Store(StoreRepresentation(
+                access.machine_type.representation(), write_barrier_kind)));
+  EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitOtherEffect(Node* node,
+                                       AllocationState const* state) {
+  EnqueueUses(node, state);
+}
+
+Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
+  Node* index = key;
+  int element_size_shift =
+      ElementSizeLog2Of(access.machine_type.representation());
+  if (element_size_shift) {
+    index = graph()->NewNode(machine()->Word32Shl(), index,
+                             jsgraph()->Int32Constant(element_size_shift));
+  }
+  const int fixed_offset = access.header_size - access.tag();
+  if (fixed_offset) {
+    index = graph()->NewNode(machine()->Int32Add(), index,
+                             jsgraph()->Int32Constant(fixed_offset));
+  }
+  if (machine()->Is64()) {
+    // TODO(turbofan): This is probably only correct for typed arrays, and only
+    // if the typed arrays are at most 2GiB in size, which happens to match
+    // exactly our current situation.
+    index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
+  }
+  return index;
+}
+
+WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
+    Node* object, AllocationState const* state,
+    WriteBarrierKind write_barrier_kind) {
+  if (state->IsNewSpaceAllocation() && state->group()->Contains(object)) {
+    write_barrier_kind = kNoWriteBarrier;
+  }
+  return write_barrier_kind;
+}
+
+MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
+    AllocationStates const& states) {
+  // Check if all states are the same; or at least if all allocation
+  // states belong to the same allocation group.
+  AllocationState const* state = states.front();
+  AllocationGroup* group = state->group();
+  for (size_t i = 1; i < states.size(); ++i) {
+    if (states[i] != state) state = nullptr;
+    if (states[i]->group() != group) group = nullptr;
+  }
+  if (state == nullptr) {
+    if (group != nullptr) {
+      // We cannot fold any more allocations into this group, but we can still
+      // eliminate write barriers on stores to this group.
+      // TODO(bmeurer): We could potentially just create a Phi here to merge
+      // the various tops; but we need to pay special attention not to create
+      // an unschedulable graph.
+      state = AllocationState::Closed(group, zone());
+    } else {
+      // The states are from different allocation groups.
+      state = empty_state();
+    }
+  }
+  return state;
+}
+
+void MemoryOptimizer::EnqueueMerge(Node* node, int index,
+                                   AllocationState const* state) {
+  DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+  int const input_count = node->InputCount() - 1;
+  DCHECK_LT(0, input_count);
+  Node* const control = node->InputAt(input_count);
+  if (control->opcode() == IrOpcode::kLoop) {
+    // For loops we always start with an empty state at the beginning.
+    if (index == 0) EnqueueUses(node, empty_state());
+  } else {
+    DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+    // Check if we already know about this pending merge.
+    NodeId const id = node->id();
+    auto it = pending_.find(id);
+    if (it == pending_.end()) {
+      // Insert a new pending merge.
+      it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
+    }
+    // Add the next input state.
+    it->second.push_back(state);
+    // Check if states for all inputs are available by now.
+    if (it->second.size() == static_cast<size_t>(input_count)) {
+      // All inputs to this effect merge are done, merge the states given all
+      // input constraints, drop the pending merge and enqueue uses of the
+      // EffectPhi {node}.
+      state = MergeStates(it->second);
+      EnqueueUses(node, state);
+      pending_.erase(it);
+    }
+  }
+}
+
+void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
+  for (Edge const edge : node->use_edges()) {
+    if (NodeProperties::IsEffectEdge(edge)) {
+      EnqueueUse(edge.from(), edge.index(), state);
+    }
+  }
+}
+
+void MemoryOptimizer::EnqueueUse(Node* node, int index,
+                                 AllocationState const* state) {
+  if (node->opcode() == IrOpcode::kEffectPhi) {
+    // An EffectPhi represents a merge of different effect chains, which
+    // needs special handling depending on whether the merge is part of a
+    // loop or just a normal control join.
+    EnqueueMerge(node, index, state);
+  } else {
+    Token token = {node, state};
+    tokens_.push(token);
+  }
+}
+
+Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
+
+Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
+
+CommonOperatorBuilder* MemoryOptimizer::common() const {
+  return jsgraph()->common();
+}
+
+MachineOperatorBuilder* MemoryOptimizer::machine() const {
+  return jsgraph()->machine();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/memory-optimizer.h b/src/compiler/memory-optimizer.h
new file mode 100644
index 0000000..f0cd546
--- /dev/null
+++ b/src/compiler/memory-optimizer.h
@@ -0,0 +1,149 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
+#define V8_COMPILER_MEMORY_OPTIMIZER_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+struct ElementAccess;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+class Node;
+class Operator;
+
+// NodeIds are identifying numbers for nodes that can be used to index auxiliary
+// out-of-line data associated with each node.
+typedef uint32_t NodeId;
+
+// Lowers all simplified memory access and allocation related nodes (i.e.
+// Allocate, LoadField, StoreField and friends) to machine operators.
+// Performs allocation folding and store write barrier elimination
+// implicitly.
+class MemoryOptimizer final {
+ public:
+  MemoryOptimizer(JSGraph* jsgraph, Zone* zone);
+  ~MemoryOptimizer() {}
+
+  void Optimize();
+
+ private:
+  // An allocation group represents a set of allocations that have been folded
+  // together.
+  class AllocationGroup final : public ZoneObject {
+   public:
+    AllocationGroup(Node* node, PretenureFlag pretenure, Zone* zone);
+    AllocationGroup(Node* node, PretenureFlag pretenure, Node* size,
+                    Zone* zone);
+    ~AllocationGroup() {}
+
+    void Add(Node* object);
+    bool Contains(Node* object) const;
+    bool IsNewSpaceAllocation() const { return pretenure() == NOT_TENURED; }
+
+    PretenureFlag pretenure() const { return pretenure_; }
+    Node* size() const { return size_; }
+
+   private:
+    ZoneSet<NodeId> node_ids_;
+    PretenureFlag const pretenure_;
+    Node* const size_;
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
+  };
+
+  // An allocation state is propagated on the effect paths through the graph.
+  class AllocationState final : public ZoneObject {
+   public:
+    static AllocationState const* Empty(Zone* zone) {
+      return new (zone) AllocationState();
+    }
+    static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
+      return new (zone) AllocationState(group);
+    }
+    static AllocationState const* Open(AllocationGroup* group, int size,
+                                       Node* top, Zone* zone) {
+      return new (zone) AllocationState(group, size, top);
+    }
+
+    bool IsNewSpaceAllocation() const;
+
+    AllocationGroup* group() const { return group_; }
+    Node* top() const { return top_; }
+    int size() const { return size_; }
+
+   private:
+    AllocationState();
+    explicit AllocationState(AllocationGroup* group);
+    AllocationState(AllocationGroup* group, int size, Node* top);
+
+    AllocationGroup* const group_;
+    // The upper bound of the combined allocated object size on the current path
+    // (max int if allocation folding is impossible on this path).
+    int const size_;
+    Node* const top_;
+
+    DISALLOW_COPY_AND_ASSIGN(AllocationState);
+  };
+
+  // An array of allocation states used to collect states on merges.
+  typedef ZoneVector<AllocationState const*> AllocationStates;
+
+  // We thread through tokens to represent the current state on a given effect
+  // path through the graph.
+  struct Token {
+    Node* node;
+    AllocationState const* state;
+  };
+
+  void VisitNode(Node*, AllocationState const*);
+  void VisitAllocate(Node*, AllocationState const*);
+  void VisitCall(Node*, AllocationState const*);
+  void VisitLoadElement(Node*, AllocationState const*);
+  void VisitLoadField(Node*, AllocationState const*);
+  void VisitStoreElement(Node*, AllocationState const*);
+  void VisitStoreField(Node*, AllocationState const*);
+  void VisitOtherEffect(Node*, AllocationState const*);
+
+  Node* ComputeIndex(ElementAccess const&, Node*);
+  WriteBarrierKind ComputeWriteBarrierKind(Node* object,
+                                           AllocationState const* state,
+                                           WriteBarrierKind);
+
+  AllocationState const* MergeStates(AllocationStates const& states);
+
+  void EnqueueMerge(Node*, int, AllocationState const*);
+  void EnqueueUses(Node*, AllocationState const*);
+  void EnqueueUse(Node*, int, AllocationState const*);
+
+  AllocationState const* empty_state() const { return empty_state_; }
+  Graph* graph() const;
+  Isolate* isolate() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  CommonOperatorBuilder* common() const;
+  MachineOperatorBuilder* machine() const;
+  Zone* zone() const { return zone_; }
+
+  SetOncePointer<const Operator> allocate_operator_;
+  JSGraph* const jsgraph_;
+  AllocationState const* const empty_state_;
+  ZoneMap<NodeId, AllocationStates> pending_;
+  ZoneQueue<Token> tokens_;
+  Zone* const zone_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MEMORY_OPTIMIZER_H_
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 9b0d706..c437d5e 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -119,7 +119,7 @@
 
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
   }
 
@@ -472,13 +472,24 @@
     __ bind(&done);                                                           \
   }
 
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)          \
+  do {                                                   \
+    __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+    __ sync();                                           \
+  } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)         \
+  do {                                                   \
+    __ sync();                                           \
+    __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
+    __ sync();                                           \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ mov(sp, fp);
   __ Pop(ra, fp);
 }
 
-void CodeGenerator::AssembleSetupStackPointer() {}
-
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -527,7 +538,8 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   MipsOperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -564,6 +576,14 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!instr->InputAt(0)->IsImmediate());
+      __ Jump(i.InputRegister(0));
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
@@ -641,7 +661,9 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -839,6 +861,36 @@
         __ sra(i.OutputRegister(), i.InputRegister(0), imm);
       }
       break;
+    case kMipsShlPair: {
+      if (instr->InputAt(2)->IsRegister()) {
+        __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputRegister(2));
+      } else {
+        uint32_t imm = i.InputOperand(2).immediate();
+        __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), imm);
+      }
+    } break;
+    case kMipsShrPair: {
+      if (instr->InputAt(2)->IsRegister()) {
+        __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputRegister(2));
+      } else {
+        uint32_t imm = i.InputOperand(2).immediate();
+        __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), imm);
+      }
+    } break;
+    case kMipsSarPair: {
+      if (instr->InputAt(2)->IsRegister()) {
+        __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputRegister(2));
+      } else {
+        uint32_t imm = i.InputOperand(2).immediate();
+        __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), imm);
+      }
+    } break;
     case kMipsExt:
       __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
              i.InputInt8(2));
@@ -869,7 +921,11 @@
         __ li(i.OutputRegister(), i.InputOperand(0));
       }
       break;
-
+    case kMipsLsa:
+      DCHECK(instr->InputAt(2)->IsImmediate());
+      __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.InputInt8(2));
+      break;
     case kMipsCmpS:
       // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
       break;
@@ -923,6 +979,14 @@
     case kMipsCmpD:
       // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
       break;
+    case kMipsAddPair:
+      __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                 i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
+      break;
+    case kMipsSubPair:
+      __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                 i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
+      break;
     case kMipsMulPair: {
       __ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
               i.InputRegister(2));
@@ -1212,7 +1276,7 @@
       __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
       break;
     case kMipsPush:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
         __ Subu(sp, sp, Operand(kDoubleSize));
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1227,7 +1291,7 @@
       break;
     }
     case kMipsStoreToStackSlot: {
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
       } else {
         __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1274,7 +1338,32 @@
     case kCheckedStoreWord64:
       UNREACHABLE();  // currently unsupported checked int64 load/store.
       break;
+    case kAtomicLoadInt8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
+      break;
+    case kAtomicLoadUint8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
+      break;
+    case kAtomicLoadInt16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
+      break;
+    case kAtomicLoadUint16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
+      break;
+    case kAtomicLoadWord32:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
+      break;
+    case kAtomicStoreWord8:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
+      break;
+    case kAtomicStoreWord16:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
+      break;
+    case kAtomicStoreWord32:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
+      break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 
@@ -1569,18 +1658,40 @@
   });
 }
 
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  return kSuccess;
 }
 
-
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::FinishFrame(Frame* frame) {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+  const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+  if (saves_fpu != 0) {
+    frame->AlignSavedCalleeRegisterSlots();
+  }
+
+  if (saves_fpu != 0) {
+    int count = base::bits::CountPopulation32(saves_fpu);
+    DCHECK(kNumCalleeSavedFPU == count);
+    frame->AllocateSavedCalleeRegisterSlots(count *
+                                            (kDoubleSize / kPointerSize));
+  }
+
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    int count = base::bits::CountPopulation32(saves);
+    DCHECK(kNumCalleeSaved == count + 1);
+    frame->AllocateSavedCalleeRegisterSlots(count);
+  }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (frame_access_state()->has_frame()) {
     if (descriptor->IsCFunctionCall()) {
       __ Push(ra, fp);
@@ -1592,6 +1703,8 @@
     }
   }
 
+  int shrink_slots = frame()->GetSpillSlotCount();
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1602,35 +1715,24 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
   const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
-  if (saves_fpu != 0) {
-    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
-  }
-  if (stack_shrink_slots > 0) {
-    __ Subu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+  if (shrink_slots > 0) {
+    __ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
   }
 
   // Save callee-saved FPU registers.
   if (saves_fpu != 0) {
     __ MultiPushFPU(saves_fpu);
-    int count = base::bits::CountPopulation32(saves_fpu);
-    DCHECK(kNumCalleeSavedFPU == count);
-    frame()->AllocateSavedCalleeRegisterSlots(count *
-                                              (kDoubleSize / kPointerSize));
   }
 
   const RegList saves = descriptor->CalleeSavedRegisters();
   if (saves != 0) {
     // Save callee-saved registers.
     __ MultiPush(saves);
-    // kNumCalleeSaved includes the fp register, but the fp register
-    // is saved separately in TF.
-    int count = base::bits::CountPopulation32(saves);
-    DCHECK(kNumCalleeSaved == count + 1);
-    frame()->AllocateSavedCalleeRegisterSlots(count);
+    DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
   }
 }
 
@@ -1701,7 +1803,12 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          __ li(dst, Operand(src.ToInt32()));
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+            __ li(dst, Operand(src.ToInt32(), src.rmode()));
+          } else {
+            __ li(dst, Operand(src.ToInt32()));
+          }
           break;
         case Constant::kFloat32:
           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
@@ -1734,7 +1841,7 @@
       }
       if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
     } else if (src.type() == Constant::kFloat32) {
-      if (destination->IsDoubleStackSlot()) {
+      if (destination->IsFPStackSlot()) {
         MemOperand dst = g.ToMemOperand(destination);
         __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
         __ sw(at, dst);
@@ -1744,27 +1851,27 @@
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
-      DoubleRegister dst = destination->IsDoubleRegister()
+      DoubleRegister dst = destination->IsFPRegister()
                                ? g.ToDoubleRegister(destination)
                                : kScratchDoubleReg;
       __ Move(dst, src.ToFloat64());
-      if (destination->IsDoubleStackSlot()) {
+      if (destination->IsFPStackSlot()) {
         __ sdc1(dst, g.ToMemOperand(destination));
       }
     }
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     FPURegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       FPURegister dst = g.ToDoubleRegister(destination);
       __ Move(dst, src);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       __ sdc1(src, g.ToMemOperand(destination));
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       __ ldc1(g.ToDoubleRegister(destination), src);
     } else {
       FPURegister temp = kScratchDoubleReg;
@@ -1808,23 +1915,23 @@
     __ lw(temp_1, dst);
     __ sw(temp_0, dst);
     __ sw(temp_1, src);
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     FPURegister temp = kScratchDoubleReg;
     FPURegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       FPURegister dst = g.ToDoubleRegister(destination);
       __ Move(temp, src);
       __ Move(src, dst);
       __ Move(dst, temp);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       MemOperand dst = g.ToMemOperand(destination);
       __ Move(temp, src);
       __ ldc1(src, dst);
       __ sdc1(temp, dst);
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPStackSlot());
     Register temp_0 = kScratchReg;
     FPURegister temp_1 = kScratchDoubleReg;
     MemOperand src0 = g.ToMemOperand(source);
@@ -1850,13 +1957,6 @@
 }
 
 
-void CodeGenerator::AddNopForSmiCodeInlining() {
-  // Unused on 32-bit ARM. Still exists on 64-bit arm.
-  // TODO(plind): Unclear when this is called now. Understand, fix if needed.
-  __ nop();  // Maybe PROPERTY_ACCESS_INLINED?
-}
-
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index d85c2a7..5c36525 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -30,9 +30,13 @@
   V(MipsClz)                       \
   V(MipsCtz)                       \
   V(MipsPopcnt)                    \
+  V(MipsLsa)                       \
   V(MipsShl)                       \
   V(MipsShr)                       \
   V(MipsSar)                       \
+  V(MipsShlPair)                   \
+  V(MipsShrPair)                   \
+  V(MipsSarPair)                   \
   V(MipsExt)                       \
   V(MipsIns)                       \
   V(MipsRor)                       \
@@ -59,6 +63,8 @@
   V(MipsSqrtD)                     \
   V(MipsMaxD)                      \
   V(MipsMinD)                      \
+  V(MipsAddPair)                   \
+  V(MipsSubPair)                   \
   V(MipsMulPair)                   \
   V(MipsFloat32RoundDown)          \
   V(MipsFloat32RoundTruncate)      \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index f86ffe7..cccb39a 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -395,27 +395,71 @@
   VisitRRO(this, kMipsSar, node);
 }
 
-void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+static void VisitInt32PairBinop(InstructionSelector* selector,
+                                InstructionCode opcode, Node* node) {
+  MipsOperandGenerator g(selector);
 
-void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitInt32PairMul(Node* node) {
-  MipsOperandGenerator g(this);
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // register.
   InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
                                  g.UseUniqueRegister(node->InputAt(1)),
                                  g.UseUniqueRegister(node->InputAt(2)),
                                  g.UseUniqueRegister(node->InputAt(3))};
+
   InstructionOperand outputs[] = {
       g.DefineAsRegister(node),
       g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
-  Emit(kMipsMulPair, 2, outputs, 4, inputs);
+  selector->Emit(opcode, 2, outputs, 4, inputs);
 }
 
-void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  VisitInt32PairBinop(this, kMipsAddPair, node);
+}
 
-void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  VisitInt32PairBinop(this, kMipsSubPair, node);
+}
 
-void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  VisitInt32PairBinop(this, kMipsMulPair, node);
+}
+
+// Shared routine for multiple shift operations.
+static void VisitWord32PairShift(InstructionSelector* selector,
+                                 InstructionCode opcode, Node* node) {
+  MipsOperandGenerator g(selector);
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // register.
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitWord32PairShift(this, kMipsShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitWord32PairShift(this, kMipsShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitWord32PairShift(this, kMipsSarPair, node);
+}
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitRRO(this, kMipsRor, node);
@@ -444,8 +488,32 @@
 
 void InstructionSelector::VisitInt32Add(Node* node) {
   MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
 
-  // TODO(plind): Consider multiply & add optimization from arm port.
+  // Select Lsa for (left + (left_of_right << imm)).
+  if (m.right().opcode() == IrOpcode::kWord32Shl &&
+      CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    if (mright.right().HasValue()) {
+      int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+      Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
+      return;
+    }
+  }
+
+  // Select Lsa for ((left_of_left << imm) + right).
+  if (m.left().opcode() == IrOpcode::kWord32Shl &&
+      CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+      Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
+      return;
+    }
+  }
+
   VisitBinop(this, node, kMipsAdd);
 }
 
@@ -467,12 +535,9 @@
       return;
     }
     if (base::bits::IsPowerOfTwo32(value - 1)) {
-      InstructionOperand temp = g.TempRegister();
-      Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+      Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(m.left().node()),
            g.TempImmediate(WhichPowerOf2(value - 1)));
-      Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
-           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
       return;
     }
     if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -654,17 +719,13 @@
   VisitRR(this, kMipsCvtSD, node);
 }
 
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      return VisitRR(this, kArchTruncateDoubleToI, node);
-    case TruncationMode::kRoundToZero:
-      return VisitRR(this, kMipsTruncWD, node);
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+  VisitRR(this, kArchTruncateDoubleToI, node);
 }
 
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  VisitRR(this, kMipsTruncWD, node);
+}
 
 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
   VisitRR(this, kMipsFloat64ExtractLowWord32, node);
@@ -693,6 +754,9 @@
   VisitRRR(this, kMipsSubS, node);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  VisitRRR(this, kMipsSubS, node);
+}
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   MipsOperandGenerator g(this);
@@ -712,6 +776,9 @@
   VisitRRR(this, kMipsSubD, node);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  VisitRRR(this, kMipsSubD, node);
+}
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRRR(this, kMipsMulS, node);
@@ -999,7 +1066,6 @@
 
 
 namespace {
-
 // Shared routine for multiple compare operations.
 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                          InstructionOperand left, InstructionOperand right,
@@ -1388,6 +1454,73 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  MipsOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicLoadWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+  }
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  MipsOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kAtomicStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kAtomicStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicStoreWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired store opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+         addr_reg, g.TempImmediate(0), g.UseRegister(value));
+  }
+}
 
 // static
 MachineOperatorBuilder::Flags
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index c6341b1..a7d2301 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -119,7 +119,7 @@
 
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
   }
 
@@ -359,7 +359,6 @@
 
 }  // namespace
 
-
 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
   do {                                                                        \
     auto result = i.Output##width##Register();                                \
@@ -367,7 +366,8 @@
     if (instr->InputAt(0)->IsRegister()) {                                    \
       auto offset = i.InputRegister(0);                                       \
       __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
-      __ Daddu(kScratchReg, i.InputRegister(2), offset);                      \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                       \
+      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                 \
       __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
     } else {                                                                  \
       int offset = static_cast<int>(i.InputOperand(0).immediate());           \
@@ -377,7 +377,6 @@
     __ bind(ool->exit());                                                     \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
   do {                                                                        \
     auto result = i.OutputRegister();                                         \
@@ -385,7 +384,8 @@
     if (instr->InputAt(0)->IsRegister()) {                                    \
       auto offset = i.InputRegister(0);                                       \
       __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
-      __ Daddu(kScratchReg, i.InputRegister(2), offset);                      \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                       \
+      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                 \
       __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
     } else {                                                                  \
       int offset = static_cast<int>(i.InputOperand(0).immediate());           \
@@ -395,7 +395,6 @@
     __ bind(ool->exit());                                                     \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
   do {                                                                 \
     Label done;                                                        \
@@ -403,7 +402,8 @@
       auto offset = i.InputRegister(0);                                \
       auto value = i.Input##width##Register(2);                        \
       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
-      __ Daddu(kScratchReg, i.InputRegister(3), offset);               \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                \
+      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);          \
       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
     } else {                                                           \
       int offset = static_cast<int>(i.InputOperand(0).immediate());    \
@@ -414,7 +414,6 @@
     __ bind(&done);                                                    \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
   do {                                                                 \
     Label done;                                                        \
@@ -422,7 +421,8 @@
       auto offset = i.InputRegister(0);                                \
       auto value = i.InputRegister(2);                                 \
       __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
-      __ Daddu(kScratchReg, i.InputRegister(3), offset);               \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                \
+      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);          \
       __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
     } else {                                                           \
       int offset = static_cast<int>(i.InputOperand(0).immediate());    \
@@ -433,7 +433,6 @@
     __ bind(&done);                                                    \
   } while (0)
 
-
 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode)                                  \
   if (kArchVariant == kMips64r6) {                                             \
     __ cfc1(kScratchReg, FCSR);                                                \
@@ -484,13 +483,24 @@
     __ bind(&done);                                                           \
   }
 
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)          \
+  do {                                                   \
+    __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+    __ sync();                                           \
+  } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)         \
+  do {                                                   \
+    __ sync();                                           \
+    __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
+    __ sync();                                           \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ mov(sp, fp);
   __ Pop(ra, fp);
 }
 
-void CodeGenerator::AssembleSetupStackPointer() {}
-
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -539,7 +549,8 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   MipsOperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -576,6 +587,14 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!instr->InputAt(0)->IsImmediate());
+      __ Jump(i.InputRegister(0));
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
@@ -651,7 +670,9 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -775,6 +796,16 @@
     case kMips64DmodU:
       __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
+    case kMips64Dlsa:
+      DCHECK(instr->InputAt(2)->IsImmediate());
+      __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+              i.InputInt8(2));
+      break;
+    case kMips64Lsa:
+      DCHECK(instr->InputAt(2)->IsImmediate());
+      __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.InputInt8(2));
+      break;
     case kMips64And:
       __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -1466,6 +1497,9 @@
     case kMips64Lw:
       __ lw(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kMips64Lwu:
+      __ lwu(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kMips64Ld:
       __ ld(i.OutputRegister(), i.MemoryOperand());
       break;
@@ -1492,7 +1526,7 @@
       __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
       break;
     case kMips64Push:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
         __ Subu(sp, sp, Operand(kDoubleSize));
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1507,7 +1541,7 @@
       break;
     }
     case kMips64StoreToStackSlot: {
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
       } else {
         __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1556,7 +1590,32 @@
     case kCheckedStoreFloat64:
       ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
       break;
+    case kAtomicLoadInt8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
+      break;
+    case kAtomicLoadUint8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
+      break;
+    case kAtomicLoadInt16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
+      break;
+    case kAtomicLoadUint16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
+      break;
+    case kAtomicLoadWord32:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
+      break;
+    case kAtomicStoreWord8:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
+      break;
+    case kAtomicStoreWord16:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
+      break;
+    case kAtomicStoreWord32:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
+      break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 
@@ -1868,16 +1927,35 @@
   });
 }
 
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  return kSuccess;
 }
 
+void CodeGenerator::FinishFrame(Frame* frame) {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
-void CodeGenerator::AssemblePrologue() {
+  const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+  if (saves_fpu != 0) {
+    int count = base::bits::CountPopulation32(saves_fpu);
+    DCHECK(kNumCalleeSavedFPU == count);
+    frame->AllocateSavedCalleeRegisterSlots(count *
+                                            (kDoubleSize / kPointerSize));
+  }
+
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    int count = base::bits::CountPopulation32(saves);
+    DCHECK(kNumCalleeSaved == count + 1);
+    frame->AllocateSavedCalleeRegisterSlots(count);
+  }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (frame_access_state()->has_frame()) {
     if (descriptor->IsCFunctionCall()) {
@@ -1890,7 +1968,8 @@
     }
   }
 
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots = frame()->GetSpillSlotCount();
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1901,32 +1980,25 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
-  if (stack_shrink_slots > 0) {
-    __ Dsubu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+  if (shrink_slots > 0) {
+    __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
   }
 
   const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
   if (saves_fpu != 0) {
     // Save callee-saved FPU registers.
     __ MultiPushFPU(saves_fpu);
-    int count = base::bits::CountPopulation32(saves_fpu);
-    DCHECK(kNumCalleeSavedFPU == count);
-    frame()->AllocateSavedCalleeRegisterSlots(count *
-                                              (kDoubleSize / kPointerSize));
+    DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
   }
 
   const RegList saves = descriptor->CalleeSavedRegisters();
   if (saves != 0) {
     // Save callee-saved registers.
     __ MultiPush(saves);
-    // kNumCalleeSaved includes the fp register, but the fp register
-    // is saved separately in TF.
-    int count = base::bits::CountPopulation32(saves);
-    DCHECK(kNumCalleeSaved == count + 1);
-    frame()->AllocateSavedCalleeRegisterSlots(count);
+    DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
   }
 }
 
@@ -1997,13 +2069,22 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          __ li(dst, Operand(src.ToInt32()));
+          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+            __ li(dst, Operand(src.ToInt32(), src.rmode()));
+          } else {
+            __ li(dst, Operand(src.ToInt32()));
+          }
           break;
         case Constant::kFloat32:
           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
           break;
         case Constant::kInt64:
-          __ li(dst, Operand(src.ToInt64()));
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+            __ li(dst, Operand(src.ToInt64(), src.rmode()));
+          } else {
+            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            __ li(dst, Operand(src.ToInt64()));
+          }
           break;
         case Constant::kFloat64:
           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
@@ -2030,7 +2111,7 @@
       }
       if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
     } else if (src.type() == Constant::kFloat32) {
-      if (destination->IsDoubleStackSlot()) {
+      if (destination->IsFPStackSlot()) {
         MemOperand dst = g.ToMemOperand(destination);
         __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
         __ sw(at, dst);
@@ -2040,27 +2121,27 @@
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
-      DoubleRegister dst = destination->IsDoubleRegister()
+      DoubleRegister dst = destination->IsFPRegister()
                                ? g.ToDoubleRegister(destination)
                                : kScratchDoubleReg;
       __ Move(dst, src.ToFloat64());
-      if (destination->IsDoubleStackSlot()) {
+      if (destination->IsFPStackSlot()) {
         __ sdc1(dst, g.ToMemOperand(destination));
       }
     }
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     FPURegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       FPURegister dst = g.ToDoubleRegister(destination);
       __ Move(dst, src);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       __ sdc1(src, g.ToMemOperand(destination));
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       __ ldc1(g.ToDoubleRegister(destination), src);
     } else {
       FPURegister temp = kScratchDoubleReg;
@@ -2104,23 +2185,23 @@
     __ ld(temp_1, dst);
     __ sd(temp_0, dst);
     __ sd(temp_1, src);
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     FPURegister temp = kScratchDoubleReg;
     FPURegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       FPURegister dst = g.ToDoubleRegister(destination);
       __ Move(temp, src);
       __ Move(src, dst);
       __ Move(dst, temp);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       MemOperand dst = g.ToMemOperand(destination);
       __ Move(temp, src);
       __ ldc1(src, dst);
       __ sdc1(temp, dst);
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPStackSlot());
     Register temp_0 = kScratchReg;
     FPURegister temp_1 = kScratchDoubleReg;
     MemOperand src0 = g.ToMemOperand(source);
@@ -2146,13 +2227,6 @@
 }
 
 
-void CodeGenerator::AddNopForSmiCodeInlining() {
-  // Unused on 32-bit ARM. Still exists on 64-bit arm.
-  // TODO(plind): Unclear when this is called now. Understand, fix if needed.
-  __ nop();  // Maybe PROPERTY_ACCESS_INLINED?
-}
-
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 9e94c09..6fd321e 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -36,6 +36,8 @@
   V(Mips64Nor)                      \
   V(Mips64Xor)                      \
   V(Mips64Clz)                      \
+  V(Mips64Lsa)                      \
+  V(Mips64Dlsa)                     \
   V(Mips64Shl)                      \
   V(Mips64Shr)                      \
   V(Mips64Sar)                      \
@@ -114,9 +116,10 @@
   V(Mips64Lh)                       \
   V(Mips64Lhu)                      \
   V(Mips64Sh)                       \
-  V(Mips64Ld)                       \
   V(Mips64Lw)                       \
+  V(Mips64Lwu)                      \
   V(Mips64Sw)                       \
+  V(Mips64Ld)                       \
   V(Mips64Sd)                       \
   V(Mips64Lwc1)                     \
   V(Mips64Swc1)                     \
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 5e2b5f2..3516e76 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -158,7 +158,7 @@
       opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
       break;
     case MachineRepresentation::kWord32:
-      opcode = kMips64Lw;
+      opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
       break;
     case MachineRepresentation::kTagged:  // Fall through.
     case MachineRepresentation::kWord64:
@@ -611,14 +611,66 @@
 
 void InstructionSelector::VisitInt32Add(Node* node) {
   Mips64OperandGenerator g(this);
-  // TODO(plind): Consider multiply & add optimization from arm port.
+  Int32BinopMatcher m(node);
+
+  // Select Lsa for (left + (left_of_right << imm)).
+  if (m.right().opcode() == IrOpcode::kWord32Shl &&
+      CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    if (mright.right().HasValue()) {
+      int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+      Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
+      return;
+    }
+  }
+
+  // Select Lsa for ((left_of_left << imm) + right).
+  if (m.left().opcode() == IrOpcode::kWord32Shl &&
+      CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+      Emit(kMips64Lsa, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.TempImmediate(shift_value));
+      return;
+    }
+  }
   VisitBinop(this, node, kMips64Add);
 }
 
 
 void InstructionSelector::VisitInt64Add(Node* node) {
   Mips64OperandGenerator g(this);
-  // TODO(plind): Consider multiply & add optimization from arm port.
+  Int64BinopMatcher m(node);
+
+  // Select Dlsa for (left + (left_of_right << imm)).
+  if (m.right().opcode() == IrOpcode::kWord64Shl &&
+      CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+    Int64BinopMatcher mright(m.right().node());
+    if (mright.right().HasValue()) {
+      int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+      Emit(kMips64Dlsa, g.DefineAsRegister(node),
+           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+           g.TempImmediate(shift_value));
+      return;
+    }
+  }
+
+  // Select Dlsa for ((left_of_left << imm) + right).
+  if (m.left().opcode() == IrOpcode::kWord64Shl &&
+      CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+    Int64BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+      Emit(kMips64Dlsa, g.DefineAsRegister(node),
+           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+           g.TempImmediate(shift_value));
+      return;
+    }
+  }
+
   VisitBinop(this, node, kMips64Dadd);
 }
 
@@ -645,12 +697,9 @@
       return;
     }
     if (base::bits::IsPowerOfTwo32(value - 1)) {
-      InstructionOperand temp = g.TempRegister();
-      Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+      Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(m.left().node()),
            g.TempImmediate(WhichPowerOf2(value - 1)));
-      Emit(kMips64Add | AddressingModeField::encode(kMode_None),
-           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
       return;
     }
     if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -705,12 +754,10 @@
       return;
     }
     if (base::bits::IsPowerOfTwo32(value - 1)) {
-      InstructionOperand temp = g.TempRegister();
-      Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
-           g.UseRegister(m.left().node()),
+      // Dlsa macro will handle the shifting value out of bound cases.
+      Emit(kMips64Dlsa, g.DefineAsRegister(node),
+           g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
            g.TempImmediate(WhichPowerOf2(value - 1)));
-      Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
-           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
       return;
     }
     if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -1047,17 +1094,13 @@
   VisitRR(this, kMips64CvtSD, node);
 }
 
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      return VisitRR(this, kArchTruncateDoubleToI, node);
-    case TruncationMode::kRoundToZero:
-      return VisitRR(this, kMips64TruncWD, node);
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+  VisitRR(this, kArchTruncateDoubleToI, node);
 }
 
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  VisitRR(this, kMips64TruncWD, node);
+}
 
 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
   VisitRR(this, kMips64CvtSL, node);
@@ -1116,6 +1159,9 @@
   VisitRRR(this, kMips64SubS, node);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  VisitRRR(this, kMips64SubS, node);
+}
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   Mips64OperandGenerator g(this);
@@ -1135,6 +1181,9 @@
   VisitRRR(this, kMips64SubD, node);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  VisitRRR(this, kMips64SubD, node);
+}
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRRR(this, kMips64MulS, node);
@@ -1916,6 +1965,73 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  Mips64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicLoadWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+  }
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  Mips64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kAtomicStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kAtomicStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicStoreWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    InstructionOperand addr_reg = g.TempRegister();
+    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired store opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+         addr_reg, g.TempImmediate(0), g.UseRegister(value));
+  }
+}
 
 // static
 MachineOperatorBuilder::Flags
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index 37d0e1a..6238be3 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -253,7 +253,8 @@
 typedef BinopMatcher<Float32Matcher, Float32Matcher> Float32BinopMatcher;
 typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
 typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
-
+typedef BinopMatcher<HeapObjectMatcher, HeapObjectMatcher>
+    HeapObjectBinopMatcher;
 
 template <class BinopMatcher, IrOpcode::Value kMulOpcode,
           IrOpcode::Value kShiftOpcode>
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index ac9cc34..2cf899b 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -158,8 +158,9 @@
 
 
 // static
-void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
-  node->ReplaceInput(FirstControlIndex(node), control);
+void NodeProperties::ReplaceControlInput(Node* node, Node* control, int index) {
+  DCHECK(index < node->op()->ControlInputCount());
+  node->ReplaceInput(FirstControlIndex(node) + index, control);
 }
 
 
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 58005a7..78ffd1d 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -81,7 +81,7 @@
 
   static void ReplaceValueInput(Node* node, Node* value, int index);
   static void ReplaceContextInput(Node* node, Node* context);
-  static void ReplaceControlInput(Node* node, Node* control);
+  static void ReplaceControlInput(Node* node, Node* control, int index = 0);
   static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
   static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
   static void RemoveFrameStateInput(Node* node, int index);
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index b038d15..ce5087c 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -32,21 +32,22 @@
   V(End)
 
 // Opcodes for constant operators.
-#define CONSTANT_OP_LIST(V) \
-  V(Int32Constant)          \
-  V(Int64Constant)          \
-  V(Float32Constant)        \
-  V(Float64Constant)        \
-  V(ExternalConstant)       \
-  V(NumberConstant)         \
-  V(HeapConstant)
+#define CONSTANT_OP_LIST(V)   \
+  V(Int32Constant)            \
+  V(Int64Constant)            \
+  V(Float32Constant)          \
+  V(Float64Constant)          \
+  V(ExternalConstant)         \
+  V(NumberConstant)           \
+  V(HeapConstant)             \
+  V(RelocatableInt32Constant) \
+  V(RelocatableInt64Constant)
 
 #define INNER_OP_LIST(V) \
   V(Select)              \
   V(Phi)                 \
-  V(EffectSet)           \
   V(EffectPhi)           \
-  V(Guard)               \
+  V(CheckPoint)          \
   V(BeginRegion)         \
   V(FinishRegion)        \
   V(FrameState)          \
@@ -150,7 +151,6 @@
   V(JSForInStep)            \
   V(JSLoadMessage)          \
   V(JSStoreMessage)         \
-  V(JSYield)                \
   V(JSStackCheck)
 
 #define JS_OP_LIST(V)     \
@@ -194,16 +194,18 @@
   V(NumberToInt32)                 \
   V(NumberToUint32)                \
   V(NumberIsHoleNaN)               \
-  V(PlainPrimitiveToNumber)        \
   V(StringToNumber)                \
+  V(ChangeTaggedSignedToInt32)     \
   V(ChangeTaggedToInt32)           \
   V(ChangeTaggedToUint32)          \
   V(ChangeTaggedToFloat64)         \
+  V(ChangeInt31ToTaggedSigned)     \
   V(ChangeInt32ToTagged)           \
   V(ChangeUint32ToTagged)          \
   V(ChangeFloat64ToTagged)         \
-  V(ChangeBoolToBit)               \
-  V(ChangeBitToBool)               \
+  V(ChangeTaggedToBit)             \
+  V(ChangeBitToTagged)             \
+  V(TruncateTaggedToWord32)        \
   V(Allocate)                      \
   V(LoadField)                     \
   V(LoadBuffer)                    \
@@ -211,10 +213,13 @@
   V(StoreField)                    \
   V(StoreBuffer)                   \
   V(StoreElement)                  \
+  V(ObjectIsCallable)              \
   V(ObjectIsNumber)                \
   V(ObjectIsReceiver)              \
   V(ObjectIsSmi)                   \
-  V(ObjectIsUndetectable)
+  V(ObjectIsString)                \
+  V(ObjectIsUndetectable)          \
+  V(TypeGuard)
 
 // Opcodes for Machine-level operators.
 #define MACHINE_COMPARE_BINOP_LIST(V) \
@@ -282,6 +287,8 @@
   V(Int64Mod)                   \
   V(Uint64Div)                  \
   V(Uint64Mod)                  \
+  V(BitcastWordToTagged)        \
+  V(TruncateFloat64ToWord32)    \
   V(ChangeFloat32ToFloat64)     \
   V(ChangeFloat64ToInt32)       \
   V(ChangeFloat64ToUint32)      \
@@ -297,8 +304,8 @@
   V(ChangeUint32ToFloat64)      \
   V(ChangeUint32ToUint64)       \
   V(TruncateFloat64ToFloat32)   \
-  V(TruncateFloat64ToInt32)     \
   V(TruncateInt64ToInt32)       \
+  V(RoundFloat64ToInt32)        \
   V(RoundInt32ToFloat32)        \
   V(RoundInt64ToFloat32)        \
   V(RoundInt64ToFloat64)        \
@@ -311,6 +318,7 @@
   V(BitcastInt64ToFloat64)      \
   V(Float32Add)                 \
   V(Float32Sub)                 \
+  V(Float32SubPreserveNan)      \
   V(Float32Mul)                 \
   V(Float32Div)                 \
   V(Float32Max)                 \
@@ -320,6 +328,7 @@
   V(Float32RoundDown)           \
   V(Float64Add)                 \
   V(Float64Sub)                 \
+  V(Float64SubPreserveNan)      \
   V(Float64Mul)                 \
   V(Float64Div)                 \
   V(Float64Mod)                 \
@@ -349,12 +358,198 @@
   V(Int32PairMul)               \
   V(Word32PairShl)              \
   V(Word32PairShr)              \
-  V(Word32PairSar)
+  V(Word32PairSar)              \
+  V(AtomicLoad)                 \
+  V(AtomicStore)
 
-#define VALUE_OP_LIST(V) \
-  COMMON_OP_LIST(V)      \
-  SIMPLIFIED_OP_LIST(V)  \
-  MACHINE_OP_LIST(V)     \
+#define MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
+  V(CreateFloat32x4)                        \
+  V(Float32x4ReplaceLane)                   \
+  V(Float32x4Abs)                           \
+  V(Float32x4Neg)                           \
+  V(Float32x4Sqrt)                          \
+  V(Float32x4RecipApprox)                   \
+  V(Float32x4RecipSqrtApprox)               \
+  V(Float32x4Add)                           \
+  V(Float32x4Sub)                           \
+  V(Float32x4Mul)                           \
+  V(Float32x4Div)                           \
+  V(Float32x4Min)                           \
+  V(Float32x4Max)                           \
+  V(Float32x4MinNum)                        \
+  V(Float32x4MaxNum)                        \
+  V(Float32x4Equal)                         \
+  V(Float32x4NotEqual)                      \
+  V(Float32x4LessThan)                      \
+  V(Float32x4LessThanOrEqual)               \
+  V(Float32x4GreaterThan)                   \
+  V(Float32x4GreaterThanOrEqual)            \
+  V(Float32x4Select)                        \
+  V(Float32x4Swizzle)                       \
+  V(Float32x4Shuffle)                       \
+  V(Float32x4FromInt32x4)                   \
+  V(Float32x4FromUint32x4)                  \
+  V(CreateInt32x4)                          \
+  V(Int32x4ReplaceLane)                     \
+  V(Int32x4Neg)                             \
+  V(Int32x4Add)                             \
+  V(Int32x4Sub)                             \
+  V(Int32x4Mul)                             \
+  V(Int32x4Min)                             \
+  V(Int32x4Max)                             \
+  V(Int32x4ShiftLeftByScalar)               \
+  V(Int32x4ShiftRightByScalar)              \
+  V(Int32x4Equal)                           \
+  V(Int32x4NotEqual)                        \
+  V(Int32x4LessThan)                        \
+  V(Int32x4LessThanOrEqual)                 \
+  V(Int32x4GreaterThan)                     \
+  V(Int32x4GreaterThanOrEqual)              \
+  V(Int32x4Select)                          \
+  V(Int32x4Swizzle)                         \
+  V(Int32x4Shuffle)                         \
+  V(Int32x4FromFloat32x4)                   \
+  V(Uint32x4Min)                            \
+  V(Uint32x4Max)                            \
+  V(Uint32x4ShiftLeftByScalar)              \
+  V(Uint32x4ShiftRightByScalar)             \
+  V(Uint32x4LessThan)                       \
+  V(Uint32x4LessThanOrEqual)                \
+  V(Uint32x4GreaterThan)                    \
+  V(Uint32x4GreaterThanOrEqual)             \
+  V(Uint32x4FromFloat32x4)                  \
+  V(CreateBool32x4)                         \
+  V(Bool32x4ReplaceLane)                    \
+  V(Bool32x4And)                            \
+  V(Bool32x4Or)                             \
+  V(Bool32x4Xor)                            \
+  V(Bool32x4Not)                            \
+  V(Bool32x4Swizzle)                        \
+  V(Bool32x4Shuffle)                        \
+  V(Bool32x4Equal)                          \
+  V(Bool32x4NotEqual)                       \
+  V(CreateInt16x8)                          \
+  V(Int16x8ReplaceLane)                     \
+  V(Int16x8Neg)                             \
+  V(Int16x8Add)                             \
+  V(Int16x8AddSaturate)                     \
+  V(Int16x8Sub)                             \
+  V(Int16x8SubSaturate)                     \
+  V(Int16x8Mul)                             \
+  V(Int16x8Min)                             \
+  V(Int16x8Max)                             \
+  V(Int16x8ShiftLeftByScalar)               \
+  V(Int16x8ShiftRightByScalar)              \
+  V(Int16x8Equal)                           \
+  V(Int16x8NotEqual)                        \
+  V(Int16x8LessThan)                        \
+  V(Int16x8LessThanOrEqual)                 \
+  V(Int16x8GreaterThan)                     \
+  V(Int16x8GreaterThanOrEqual)              \
+  V(Int16x8Select)                          \
+  V(Int16x8Swizzle)                         \
+  V(Int16x8Shuffle)                         \
+  V(Uint16x8AddSaturate)                    \
+  V(Uint16x8SubSaturate)                    \
+  V(Uint16x8Min)                            \
+  V(Uint16x8Max)                            \
+  V(Uint16x8ShiftLeftByScalar)              \
+  V(Uint16x8ShiftRightByScalar)             \
+  V(Uint16x8LessThan)                       \
+  V(Uint16x8LessThanOrEqual)                \
+  V(Uint16x8GreaterThan)                    \
+  V(Uint16x8GreaterThanOrEqual)             \
+  V(CreateBool16x8)                         \
+  V(Bool16x8ReplaceLane)                    \
+  V(Bool16x8And)                            \
+  V(Bool16x8Or)                             \
+  V(Bool16x8Xor)                            \
+  V(Bool16x8Not)                            \
+  V(Bool16x8Swizzle)                        \
+  V(Bool16x8Shuffle)                        \
+  V(Bool16x8Equal)                          \
+  V(Bool16x8NotEqual)                       \
+  V(CreateInt8x16)                          \
+  V(Int8x16ReplaceLane)                     \
+  V(Int8x16Neg)                             \
+  V(Int8x16Add)                             \
+  V(Int8x16AddSaturate)                     \
+  V(Int8x16Sub)                             \
+  V(Int8x16SubSaturate)                     \
+  V(Int8x16Mul)                             \
+  V(Int8x16Min)                             \
+  V(Int8x16Max)                             \
+  V(Int8x16ShiftLeftByScalar)               \
+  V(Int8x16ShiftRightByScalar)              \
+  V(Int8x16Equal)                           \
+  V(Int8x16NotEqual)                        \
+  V(Int8x16LessThan)                        \
+  V(Int8x16LessThanOrEqual)                 \
+  V(Int8x16GreaterThan)                     \
+  V(Int8x16GreaterThanOrEqual)              \
+  V(Int8x16Select)                          \
+  V(Int8x16Swizzle)                         \
+  V(Int8x16Shuffle)                         \
+  V(Uint8x16AddSaturate)                    \
+  V(Uint8x16SubSaturate)                    \
+  V(Uint8x16Min)                            \
+  V(Uint8x16Max)                            \
+  V(Uint8x16ShiftLeftByScalar)              \
+  V(Uint8x16ShiftRightByScalar)             \
+  V(Uint8x16LessThan)                       \
+  V(Uint8x16LessThanOrEqual)                \
+  V(Uint8x16GreaterThan)                    \
+  V(Uint8x16GreaterThanOrEqual)             \
+  V(CreateBool8x16)                         \
+  V(Bool8x16ReplaceLane)                    \
+  V(Bool8x16And)                            \
+  V(Bool8x16Or)                             \
+  V(Bool8x16Xor)                            \
+  V(Bool8x16Not)                            \
+  V(Bool8x16Swizzle)                        \
+  V(Bool8x16Shuffle)                        \
+  V(Bool8x16Equal)                          \
+  V(Bool8x16NotEqual)                       \
+  V(Simd128Load)                            \
+  V(Simd128Load1)                           \
+  V(Simd128Load2)                           \
+  V(Simd128Load3)                           \
+  V(Simd128Store)                           \
+  V(Simd128Store1)                          \
+  V(Simd128Store2)                          \
+  V(Simd128Store3)                          \
+  V(Simd128And)                             \
+  V(Simd128Or)                              \
+  V(Simd128Xor)                             \
+  V(Simd128Not)
+
+#define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
+  V(Float32x4ExtractLane)                  \
+  V(Int32x4ExtractLane)                    \
+  V(Int16x8ExtractLane)                    \
+  V(Int8x16ExtractLane)
+
+#define MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
+  V(Bool32x4ExtractLane)                    \
+  V(Bool32x4AnyTrue)                        \
+  V(Bool32x4AllTrue)                        \
+  V(Bool16x8ExtractLane)                    \
+  V(Bool16x8AnyTrue)                        \
+  V(Bool16x8AllTrue)                        \
+  V(Bool8x16ExtractLane)                    \
+  V(Bool8x16AnyTrue)                        \
+  V(Bool8x16AllTrue)
+
+#define MACHINE_SIMD_OP_LIST(V)       \
+  MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
+  MACHINE_SIMD_RETURN_NUM_OP_LIST(V)  \
+  MACHINE_SIMD_RETURN_BOOL_OP_LIST(V)
+
+#define VALUE_OP_LIST(V)  \
+  COMMON_OP_LIST(V)       \
+  SIMPLIFIED_OP_LIST(V)   \
+  MACHINE_OP_LIST(V)      \
+  MACHINE_SIMD_OP_LIST(V) \
   JS_OP_LIST(V)
 
 // The combination of all operators at all levels and the common operators.
@@ -400,7 +595,7 @@
 
   // Returns true if opcode for constant operator.
   static bool IsConstantOpcode(Value value) {
-    return kInt32Constant <= value && value <= kHeapConstant;
+    return kInt32Constant <= value && value <= kRelocatableInt64Constant;
   }
 
   static bool IsPhiOpcode(Value value) {
diff --git a/src/compiler/pipeline-statistics.h b/src/compiler/pipeline-statistics.h
index 2b6563d..c52c61c 100644
--- a/src/compiler/pipeline-statistics.h
+++ b/src/compiler/pipeline-statistics.h
@@ -7,6 +7,8 @@
 
 #include <string>
 
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/smart-pointers.h"
 #include "src/compilation-statistics.h"
 #include "src/compiler/zone-pool.h"
 
@@ -22,6 +24,7 @@
   ~PipelineStatistics();
 
   void BeginPhaseKind(const char* phase_kind_name);
+  void EndPhaseKind();
 
  private:
   size_t OuterZoneSize() {
@@ -43,7 +46,6 @@
   };
 
   bool InPhaseKind() { return !phase_kind_stats_.scope_.is_empty(); }
-  void EndPhaseKind();
 
   friend class PhaseScope;
   bool InPhase() { return !phase_stats_.scope_.is_empty(); }
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 1d7e967..82583e9 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -14,20 +14,20 @@
 #include "src/compiler/basic-block-instrumentor.h"
 #include "src/compiler/branch-elimination.h"
 #include "src/compiler/bytecode-graph-builder.h"
-#include "src/compiler/change-lowering.h"
 #include "src/compiler/code-generator.h"
 #include "src/compiler/common-operator-reducer.h"
 #include "src/compiler/control-flow-optimizer.h"
 #include "src/compiler/dead-code-elimination.h"
-#include "src/compiler/escape-analysis.h"
+#include "src/compiler/effect-control-linearizer.h"
 #include "src/compiler/escape-analysis-reducer.h"
+#include "src/compiler/escape-analysis.h"
 #include "src/compiler/frame-elider.h"
 #include "src/compiler/graph-replay.h"
 #include "src/compiler/graph-trimmer.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/greedy-allocator.h"
-#include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
+#include "src/compiler/instruction.h"
 #include "src/compiler/js-builtin-reducer.h"
 #include "src/compiler/js-call-reducer.h"
 #include "src/compiler/js-context-specialization.h"
@@ -45,24 +45,27 @@
 #include "src/compiler/loop-analysis.h"
 #include "src/compiler/loop-peeling.h"
 #include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/memory-optimizer.h"
 #include "src/compiler/move-optimizer.h"
 #include "src/compiler/osr.h"
 #include "src/compiler/pipeline-statistics.h"
-#include "src/compiler/register-allocator.h"
 #include "src/compiler/register-allocator-verifier.h"
+#include "src/compiler/register-allocator.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/scheduler.h"
 #include "src/compiler/select-lowering.h"
 #include "src/compiler/simplified-lowering.h"
-#include "src/compiler/simplified-operator.h"
 #include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/compiler/tail-call-optimization.h"
 #include "src/compiler/type-hint-analyzer.h"
 #include "src/compiler/typer.h"
 #include "src/compiler/value-numbering-reducer.h"
 #include "src/compiler/verifier.h"
 #include "src/compiler/zone-pool.h"
+#include "src/isolate-inl.h"
 #include "src/ostreams.h"
+#include "src/parsing/parser.h"
 #include "src/register-configuration.h"
 #include "src/type-info.h"
 #include "src/utils.h"
@@ -78,31 +81,19 @@
                PipelineStatistics* pipeline_statistics)
       : isolate_(info->isolate()),
         info_(info),
+        debug_name_(info_->GetDebugName()),
         outer_zone_(info_->zone()),
         zone_pool_(zone_pool),
         pipeline_statistics_(pipeline_statistics),
-        compilation_failed_(false),
-        code_(Handle<Code>::null()),
         graph_zone_scope_(zone_pool_),
         graph_zone_(graph_zone_scope_.zone()),
-        graph_(nullptr),
-        loop_assignment_(nullptr),
-        simplified_(nullptr),
-        machine_(nullptr),
-        common_(nullptr),
-        javascript_(nullptr),
-        jsgraph_(nullptr),
-        schedule_(nullptr),
         instruction_zone_scope_(zone_pool_),
         instruction_zone_(instruction_zone_scope_.zone()),
-        sequence_(nullptr),
-        frame_(nullptr),
         register_allocation_zone_scope_(zone_pool_),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()),
-        register_allocation_data_(nullptr) {
+        register_allocation_zone_(register_allocation_zone_scope_.zone()) {
     PhaseScope scope(pipeline_statistics, "init pipeline data");
     graph_ = new (graph_zone_) Graph(graph_zone_);
-    source_positions_.Reset(new SourcePositionTable(graph_));
+    source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
     simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
     machine_ = new (graph_zone_) MachineOperatorBuilder(
         graph_zone_, MachineType::PointerRepresentation(),
@@ -113,62 +104,50 @@
         JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
   }
 
+  // For WASM compile entry point.
+  PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+               SourcePositionTable* source_positions)
+      : isolate_(info->isolate()),
+        info_(info),
+        debug_name_(info_->GetDebugName()),
+        zone_pool_(zone_pool),
+        graph_zone_scope_(zone_pool_),
+        graph_(graph),
+        source_positions_(source_positions),
+        instruction_zone_scope_(zone_pool_),
+        instruction_zone_(instruction_zone_scope_.zone()),
+        register_allocation_zone_scope_(zone_pool_),
+        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+
   // For machine graph testing entry point.
   PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
                Schedule* schedule)
       : isolate_(info->isolate()),
         info_(info),
-        outer_zone_(nullptr),
+        debug_name_(info_->GetDebugName()),
         zone_pool_(zone_pool),
-        pipeline_statistics_(nullptr),
-        compilation_failed_(false),
-        code_(Handle<Code>::null()),
         graph_zone_scope_(zone_pool_),
-        graph_zone_(nullptr),
         graph_(graph),
-        source_positions_(new SourcePositionTable(graph_)),
-        loop_assignment_(nullptr),
-        simplified_(nullptr),
-        machine_(nullptr),
-        common_(nullptr),
-        javascript_(nullptr),
-        jsgraph_(nullptr),
+        source_positions_(new (info->zone()) SourcePositionTable(graph_)),
         schedule_(schedule),
         instruction_zone_scope_(zone_pool_),
         instruction_zone_(instruction_zone_scope_.zone()),
-        sequence_(nullptr),
-        frame_(nullptr),
         register_allocation_zone_scope_(zone_pool_),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()),
-        register_allocation_data_(nullptr) {}
+        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
 
   // For register allocation testing entry point.
   PipelineData(ZonePool* zone_pool, CompilationInfo* info,
                InstructionSequence* sequence)
       : isolate_(info->isolate()),
         info_(info),
-        outer_zone_(nullptr),
+        debug_name_(info_->GetDebugName()),
         zone_pool_(zone_pool),
-        pipeline_statistics_(nullptr),
-        compilation_failed_(false),
-        code_(Handle<Code>::null()),
         graph_zone_scope_(zone_pool_),
-        graph_zone_(nullptr),
-        graph_(nullptr),
-        loop_assignment_(nullptr),
-        simplified_(nullptr),
-        machine_(nullptr),
-        common_(nullptr),
-        javascript_(nullptr),
-        jsgraph_(nullptr),
-        schedule_(nullptr),
         instruction_zone_scope_(zone_pool_),
         instruction_zone_(sequence->zone()),
         sequence_(sequence),
-        frame_(nullptr),
         register_allocation_zone_scope_(zone_pool_),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()),
-        register_allocation_data_(nullptr) {}
+        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
 
   ~PipelineData() {
     DeleteRegisterAllocationZone();
@@ -193,9 +172,7 @@
 
   Zone* graph_zone() const { return graph_zone_; }
   Graph* graph() const { return graph_; }
-  SourcePositionTable* source_positions() const {
-    return source_positions_.get();
-  }
+  SourcePositionTable* source_positions() const { return source_positions_; }
   MachineOperatorBuilder* machine() const { return machine_; }
   CommonOperatorBuilder* common() const { return common_; }
   JSOperatorBuilder* javascript() const { return javascript_; }
@@ -224,6 +201,7 @@
     DCHECK(!schedule_);
     schedule_ = schedule;
   }
+  void reset_schedule() { schedule_ = nullptr; }
 
   Zone* instruction_zone() const { return instruction_zone_; }
   InstructionSequence* sequence() const { return sequence_; }
@@ -234,14 +212,24 @@
     return register_allocation_data_;
   }
 
+  BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
+  void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
+    profiler_data_ = profiler_data;
+  }
+
+  std::string const& source_position_output() const {
+    return source_position_output_;
+  }
+  void set_source_position_output(std::string const& source_position_output) {
+    source_position_output_ = source_position_output;
+  }
+
   void DeleteGraphZone() {
-    // Destroy objects with destructors first.
-    source_positions_.Reset(nullptr);
     if (graph_zone_ == nullptr) return;
-    // Destroy zone and clear pointers.
     graph_zone_scope_.Destroy();
     graph_zone_ = nullptr;
     graph_ = nullptr;
+    source_positions_ = nullptr;
     loop_assignment_ = nullptr;
     type_hint_analysis_ = nullptr;
     simplified_ = nullptr;
@@ -288,42 +276,53 @@
     if (descriptor != nullptr) {
       fixed_frame_size = CalculateFixedFrameSize(descriptor);
     }
-    frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
+    frame_ = new (instruction_zone()) Frame(fixed_frame_size);
   }
 
   void InitializeRegisterAllocationData(const RegisterConfiguration* config,
-                                        CallDescriptor* descriptor,
-                                        const char* debug_name) {
+                                        CallDescriptor* descriptor) {
     DCHECK(register_allocation_data_ == nullptr);
     register_allocation_data_ = new (register_allocation_zone())
         RegisterAllocationData(config, register_allocation_zone(), frame(),
-                               sequence(), debug_name);
+                               sequence(), debug_name_.get());
+  }
+
+  void BeginPhaseKind(const char* phase_kind_name) {
+    if (pipeline_statistics() != nullptr) {
+      pipeline_statistics()->BeginPhaseKind(phase_kind_name);
+    }
+  }
+
+  void EndPhaseKind() {
+    if (pipeline_statistics() != nullptr) {
+      pipeline_statistics()->EndPhaseKind();
+    }
   }
 
  private:
-  Isolate* isolate_;
-  CompilationInfo* info_;
-  Zone* outer_zone_;
+  Isolate* const isolate_;
+  CompilationInfo* const info_;
+  base::SmartArrayPointer<char> debug_name_;
+  Zone* outer_zone_ = nullptr;
   ZonePool* const zone_pool_;
-  PipelineStatistics* pipeline_statistics_;
-  bool compilation_failed_;
-  Handle<Code> code_;
+  PipelineStatistics* pipeline_statistics_ = nullptr;
+  bool compilation_failed_ = false;
+  Handle<Code> code_ = Handle<Code>::null();
 
   // All objects in the following group of fields are allocated in graph_zone_.
   // They are all set to nullptr when the graph_zone_ is destroyed.
   ZonePool::Scope graph_zone_scope_;
-  Zone* graph_zone_;
-  Graph* graph_;
-  // TODO(dcarney): make this into a ZoneObject.
-  base::SmartPointer<SourcePositionTable> source_positions_;
-  LoopAssignmentAnalysis* loop_assignment_;
+  Zone* graph_zone_ = nullptr;
+  Graph* graph_ = nullptr;
+  SourcePositionTable* source_positions_ = nullptr;
+  LoopAssignmentAnalysis* loop_assignment_ = nullptr;
   TypeHintAnalysis* type_hint_analysis_ = nullptr;
-  SimplifiedOperatorBuilder* simplified_;
-  MachineOperatorBuilder* machine_;
-  CommonOperatorBuilder* common_;
-  JSOperatorBuilder* javascript_;
-  JSGraph* jsgraph_;
-  Schedule* schedule_;
+  SimplifiedOperatorBuilder* simplified_ = nullptr;
+  MachineOperatorBuilder* machine_ = nullptr;
+  CommonOperatorBuilder* common_ = nullptr;
+  JSOperatorBuilder* javascript_ = nullptr;
+  JSGraph* jsgraph_ = nullptr;
+  Schedule* schedule_ = nullptr;
 
   // All objects in the following group of fields are allocated in
   // instruction_zone_.  They are all set to nullptr when the instruction_zone_
@@ -331,15 +330,21 @@
   // destroyed.
   ZonePool::Scope instruction_zone_scope_;
   Zone* instruction_zone_;
-  InstructionSequence* sequence_;
-  Frame* frame_;
+  InstructionSequence* sequence_ = nullptr;
+  Frame* frame_ = nullptr;
 
   // All objects in the following group of fields are allocated in
   // register_allocation_zone_.  They are all set to nullptr when the zone is
   // destroyed.
   ZonePool::Scope register_allocation_zone_scope_;
   Zone* register_allocation_zone_;
-  RegisterAllocationData* register_allocation_data_;
+  RegisterAllocationData* register_allocation_data_ = nullptr;
+
+  // Basic block profiling support.
+  BasicBlockProfiler::Data* profiler_data_ = nullptr;
+
+  // Source position output for --trace-turbo.
+  std::string source_position_output_;
 
   int CalculateFixedFrameSize(CallDescriptor* descriptor) {
     if (descriptor->IsJSFunctionCall()) {
@@ -354,6 +359,38 @@
   DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
 
+class PipelineImpl final {
+ public:
+  explicit PipelineImpl(PipelineData* data) : data_(data) {}
+
+  // Helpers for executing pipeline phases.
+  template <typename Phase>
+  void Run();
+  template <typename Phase, typename Arg0>
+  void Run(Arg0 arg_0);
+  template <typename Phase, typename Arg0, typename Arg1>
+  void Run(Arg0 arg_0, Arg1 arg_1);
+
+  // Run the graph creation and initial optimization passes.
+  bool CreateGraph();
+
+  // Run the concurrent optimization passes.
+  bool OptimizeGraph(Linkage* linkage);
+
+  // Perform the actual code generation and return handle to a code object.
+  Handle<Code> GenerateCode(Linkage* linkage);
+
+  bool ScheduleAndSelectInstructions(Linkage* linkage);
+  void RunPrintAndVerify(const char* phase, bool untyped = false);
+  Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
+  void AllocateRegisters(const RegisterConfiguration* config,
+                         CallDescriptor* descriptor, bool run_verifier);
+
+  CompilationInfo* info() const;
+  Isolate* isolate() const;
+
+  PipelineData* const data_;
+};
 
 namespace {
 
@@ -363,26 +400,30 @@
                       std::ios_base::app) {}
 };
 
+struct TurboJsonFile : public std::ofstream {
+  TurboJsonFile(CompilationInfo* info, std::ios_base::openmode mode)
+      : std::ofstream(GetVisualizerLogFileName(info, nullptr, "json").get(),
+                      mode) {}
+};
 
 void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
   if (FLAG_trace_turbo) {
-    FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
-    if (json_file != nullptr) {
-      OFStream json_of(json_file);
-      json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
-      std::stringstream schedule_stream;
-      schedule_stream << *schedule;
-      std::string schedule_string(schedule_stream.str());
-      for (const auto& c : schedule_string) {
-        json_of << AsEscapedUC16ForJSON(c);
-      }
-      json_of << "\"},\n";
-      fclose(json_file);
+    AllowHandleDereference allow_deref;
+    TurboJsonFile json_of(info, std::ios_base::app);
+    json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
+    std::stringstream schedule_stream;
+    schedule_stream << *schedule;
+    std::string schedule_string(schedule_stream.str());
+    for (const auto& c : schedule_string) {
+      json_of << AsEscapedUC16ForJSON(c);
     }
+    json_of << "\"},\n";
   }
-  if (!FLAG_trace_turbo_graph && !FLAG_trace_turbo_scheduler) return;
-  OFStream os(stdout);
-  os << "-- Schedule --------------------------------------\n" << *schedule;
+  if (FLAG_trace_turbo_graph || FLAG_trace_turbo_scheduler) {
+    AllowHandleDereference allow_deref;
+    OFStream os(stdout);
+    os << "-- Schedule --------------------------------------\n" << *schedule;
+  }
 }
 
 
@@ -476,32 +517,199 @@
   ZonePool::Scope zone_scope_;
 };
 
+PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
+                                             ZonePool* zone_pool) {
+  PipelineStatistics* pipeline_statistics = nullptr;
+
+  if (FLAG_turbo_stats) {
+    pipeline_statistics = new PipelineStatistics(info, zone_pool);
+    pipeline_statistics->BeginPhaseKind("initializing");
+  }
+
+  if (FLAG_trace_turbo) {
+    TurboJsonFile json_of(info, std::ios_base::trunc);
+    Handle<Script> script = info->script();
+    base::SmartArrayPointer<char> function_name = info->GetDebugName();
+    int pos = info->shared_info()->start_position();
+    json_of << "{\"function\":\"" << function_name.get()
+            << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
+    if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+      DisallowHeapAllocation no_allocation;
+      int start = info->shared_info()->start_position();
+      int len = info->shared_info()->end_position() - start;
+      String::SubStringRange source(String::cast(script->source()), start, len);
+      for (const auto& c : source) {
+        json_of << AsEscapedUC16ForJSON(c);
+      }
+    }
+    json_of << "\",\n\"phases\":[";
+  }
+
+  return pipeline_statistics;
+}
+
 }  // namespace
 
+class PipelineCompilationJob final : public CompilationJob {
+ public:
+  PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
+      // Note that the CompilationInfo is not initialized at the time we pass it
+      // to the CompilationJob constructor, but it is not dereferenced there.
+      : CompilationJob(&info_, "TurboFan"),
+        zone_(isolate->allocator()),
+        zone_pool_(isolate->allocator()),
+        parse_info_(&zone_, function),
+        info_(&parse_info_, function),
+        pipeline_statistics_(CreatePipelineStatistics(info(), &zone_pool_)),
+        data_(&zone_pool_, info(), pipeline_statistics_.get()),
+        pipeline_(&data_),
+        linkage_(nullptr) {}
+
+ protected:
+  Status CreateGraphImpl() final;
+  Status OptimizeGraphImpl() final;
+  Status GenerateCodeImpl() final;
+
+ private:
+  Zone zone_;
+  ZonePool zone_pool_;
+  ParseInfo parse_info_;
+  CompilationInfo info_;
+  base::SmartPointer<PipelineStatistics> pipeline_statistics_;
+  PipelineData data_;
+  PipelineImpl pipeline_;
+  Linkage* linkage_;
+};
+
+PipelineCompilationJob::Status PipelineCompilationJob::CreateGraphImpl() {
+  if (info()->shared_info()->asm_function()) {
+    if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
+    info()->MarkAsFunctionContextSpecializing();
+  } else {
+    if (!FLAG_always_opt) {
+      info()->MarkAsBailoutOnUninitialized();
+    }
+    if (FLAG_native_context_specialization) {
+      info()->MarkAsNativeContextSpecializing();
+    }
+  }
+  if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
+    info()->MarkAsDeoptimizationEnabled();
+  }
+  if (!info()->is_optimizing_from_bytecode()) {
+    if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
+  }
+
+  linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
+
+  if (!pipeline_.CreateGraph()) {
+    if (isolate()->has_pending_exception()) return FAILED;  // Stack overflowed.
+    return AbortOptimization(kGraphBuildingFailed);
+  }
+
+  return SUCCEEDED;
+}
+
+PipelineCompilationJob::Status PipelineCompilationJob::OptimizeGraphImpl() {
+  if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
+  return SUCCEEDED;
+}
+
+PipelineCompilationJob::Status PipelineCompilationJob::GenerateCodeImpl() {
+  Handle<Code> code = pipeline_.GenerateCode(linkage_);
+  if (code.is_null()) {
+    if (info()->bailout_reason() == kNoReason) {
+      return AbortOptimization(kCodeGenerationFailed);
+    }
+    return FAILED;
+  }
+  info()->dependencies()->Commit(code);
+  info()->SetCode(code);
+  if (info()->is_deoptimization_enabled()) {
+    info()->context()->native_context()->AddOptimizedCode(*code);
+    RegisterWeakObjectsInOptimizedCode(code);
+  }
+  return SUCCEEDED;
+}
+
+class PipelineWasmCompilationJob final : public CompilationJob {
+ public:
+  explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
+                                      CallDescriptor* descriptor,
+                                      SourcePositionTable* source_positions)
+      : CompilationJob(info, "TurboFan"),
+        zone_pool_(info->isolate()->allocator()),
+        data_(&zone_pool_, info, graph, source_positions),
+        pipeline_(&data_),
+        linkage_(descriptor) {}
+
+ protected:
+  Status CreateGraphImpl() final;
+  Status OptimizeGraphImpl() final;
+  Status GenerateCodeImpl() final;
+
+ private:
+  ZonePool zone_pool_;
+  PipelineData data_;
+  PipelineImpl pipeline_;
+  Linkage linkage_;
+};
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::CreateGraphImpl() {
+  return SUCCEEDED;
+}
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::OptimizeGraphImpl() {
+  if (FLAG_trace_turbo) {
+    TurboJsonFile json_of(info(), std::ios_base::trunc);
+    json_of << "{\"function\":\"" << info()->GetDebugName().get()
+            << "\", \"source\":\"\",\n\"phases\":[";
+  }
+
+  pipeline_.RunPrintAndVerify("Machine", true);
+
+  if (!pipeline_.ScheduleAndSelectInstructions(&linkage_)) return FAILED;
+  return SUCCEEDED;
+}
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::GenerateCodeImpl() {
+  pipeline_.GenerateCode(&linkage_);
+  return SUCCEEDED;
+}
 
 template <typename Phase>
-void Pipeline::Run() {
+void PipelineImpl::Run() {
   PipelineRunScope scope(this->data_, Phase::phase_name());
   Phase phase;
   phase.Run(this->data_, scope.zone());
 }
 
-
 template <typename Phase, typename Arg0>
-void Pipeline::Run(Arg0 arg_0) {
+void PipelineImpl::Run(Arg0 arg_0) {
   PipelineRunScope scope(this->data_, Phase::phase_name());
   Phase phase;
   phase.Run(this->data_, scope.zone(), arg_0);
 }
 
+template <typename Phase, typename Arg0, typename Arg1>
+void PipelineImpl::Run(Arg0 arg_0, Arg1 arg_1) {
+  PipelineRunScope scope(this->data_, Phase::phase_name());
+  Phase phase;
+  phase.Run(this->data_, scope.zone(), arg_0, arg_1);
+}
 
 struct LoopAssignmentAnalysisPhase {
   static const char* phase_name() { return "loop assignment analysis"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
-    LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
-    data->set_loop_assignment(loop_assignment);
+    if (!data->info()->is_optimizing_from_bytecode()) {
+      AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
+      LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
+      data->set_loop_assignment(loop_assignment);
+    }
   }
 };
 
@@ -510,10 +718,12 @@
   static const char* phase_name() { return "type hint analysis"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    TypeHintAnalyzer analyzer(data->graph_zone());
-    Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
-    TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
-    data->set_type_hint_analysis(type_hint_analysis);
+    if (!data->info()->is_optimizing_from_bytecode()) {
+      TypeHintAnalyzer analyzer(data->graph_zone());
+      Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
+      TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
+      data->set_type_hint_analysis(type_hint_analysis);
+    }
   }
 };
 
@@ -525,7 +735,7 @@
     bool stack_check = !data->info()->IsStub();
     bool succeeded = false;
 
-    if (data->info()->shared_info()->HasBytecodeArray()) {
+    if (data->info()->is_optimizing_from_bytecode()) {
       BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
                                          data->jsgraph());
       succeeded = graph_builder.CreateGraph();
@@ -610,6 +820,31 @@
   }
 };
 
+#ifdef DEBUG
+
+struct UntyperPhase {
+  static const char* phase_name() { return "untyper"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    class RemoveTypeReducer final : public Reducer {
+     public:
+      Reduction Reduce(Node* node) final {
+        if (NodeProperties::IsTyped(node)) {
+          NodeProperties::RemoveType(node);
+          return Changed(node);
+        }
+        return NoChange();
+      }
+    };
+
+    JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+    RemoveTypeReducer remove_type_reducer;
+    AddReducer(data, &graph_reducer, &remove_type_reducer);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+#endif  // DEBUG
 
 struct OsrDeconstructionPhase {
   static const char* phase_name() { return "OSR deconstruction"; }
@@ -629,7 +864,7 @@
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
     LoadElimination load_elimination(&graph_reducer, data->graph(),
-                                     data->common());
+                                     data->jsgraph()->simplified());
     JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
     MaybeHandle<LiteralsArray> literals_array =
         data->info()->is_native_context_specializing()
@@ -697,30 +932,28 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
                                          &escape_analysis, temp_zone);
-    escape_reducer.SetExistsVirtualAllocate(
-        escape_analysis.ExistsVirtualAllocate());
     AddReducer(data, &graph_reducer, &escape_reducer);
     graph_reducer.ReduceGraph();
     escape_reducer.VerifyReplacement();
   }
 };
 
-
-struct SimplifiedLoweringPhase {
-  static const char* phase_name() { return "simplified lowering"; }
+struct RepresentationSelectionPhase {
+  static const char* phase_name() { return "representation selection"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
                                 data->source_positions());
     lowering.LowerAllNodes();
+  }
+};
 
-    // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
-    if (lowering.abort_compilation_) {
-      data->set_compilation_failed();
-      return;
-    }
+struct EarlyOptimizationPhase {
+  static const char* phase_name() { return "early optimization"; }
 
+  void Run(PipelineData* data, Zone* temp_zone) {
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+    JSGenericLowering generic_lowering(data->jsgraph());
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
     SimplifiedOperatorReducer simple_reducer(data->jsgraph());
@@ -730,6 +963,7 @@
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &simple_reducer);
+    AddReducer(data, &graph_reducer, &generic_lowering);
     AddReducer(data, &graph_reducer, &value_numbering);
     AddReducer(data, &graph_reducer, &machine_reducer);
     AddReducer(data, &graph_reducer, &common_reducer);
@@ -737,7 +971,6 @@
   }
 };
 
-
 struct ControlFlowOptimizationPhase {
   static const char* phase_name() { return "control flow optimization"; }
 
@@ -748,31 +981,70 @@
   }
 };
 
+struct EffectControlLinearizationPhase {
+  static const char* phase_name() { return "effect linearization"; }
 
-struct ChangeLoweringPhase {
-  static const char* phase_name() { return "change lowering"; }
+  void Run(PipelineData* data, Zone* temp_zone) {
+    // The scheduler requires the graphs to be trimmed, so trim now.
+    // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
+    // graphs.
+    GraphTrimmer trimmer(temp_zone, data->graph());
+    NodeVector roots(temp_zone);
+    data->jsgraph()->GetCachedNodes(&roots);
+    trimmer.TrimGraph(roots.begin(), roots.end());
+
+    // Schedule the graph without node splitting so that we can
+    // fix the effect and control flow for nodes with low-level side
+    // effects (such as changing representation to tagged or
+    // 'floating' allocation regions.)
+    Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
+                                                    Scheduler::kNoFlags);
+    if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
+    TraceSchedule(data->info(), schedule);
+
+    // Post-pass for wiring the control/effects
+    // - connect allocating representation changes into the control&effect
+    //   chains and lower them,
+    // - get rid of the region markers,
+    // - introduce effect phis and rewire effects to get SSA again.
+    EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone);
+    linearizer.Run();
+  }
+};
+
+struct MemoryOptimizationPhase {
+  static const char* phase_name() { return "memory optimization"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
+    optimizer.Optimize();
+  }
+};
+
+struct LateOptimizationPhase {
+  static const char* phase_name() { return "late optimization"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
-    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
     ValueNumberingReducer value_numbering(temp_zone);
-    ChangeLowering lowering(data->jsgraph());
     MachineOperatorReducer machine_reducer(data->jsgraph());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
+    SelectLowering select_lowering(data->jsgraph()->graph(),
+                                   data->jsgraph()->common());
+    TailCallOptimization tco(data->common(), data->graph());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
-    AddReducer(data, &graph_reducer, &simple_reducer);
     AddReducer(data, &graph_reducer, &value_numbering);
-    AddReducer(data, &graph_reducer, &lowering);
     AddReducer(data, &graph_reducer, &machine_reducer);
     AddReducer(data, &graph_reducer, &common_reducer);
+    AddReducer(data, &graph_reducer, &select_lowering);
+    AddReducer(data, &graph_reducer, &tco);
     graph_reducer.ReduceGraph();
   }
 };
 
-
 struct EarlyGraphTrimmingPhase {
   static const char* phase_name() { return "early graph trimming"; }
   void Run(PipelineData* data, Zone* temp_zone) {
@@ -810,30 +1082,6 @@
 };
 
 
-struct GenericLoweringPhase {
-  static const char* phase_name() { return "generic lowering"; }
-
-  void Run(PipelineData* data, Zone* temp_zone) {
-    JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
-    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
-                                              data->common());
-    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
-                                         data->common(), data->machine());
-    JSGenericLowering generic_lowering(data->info()->is_typing_enabled(),
-                                       data->jsgraph());
-    SelectLowering select_lowering(data->jsgraph()->graph(),
-                                   data->jsgraph()->common());
-    TailCallOptimization tco(data->common(), data->graph());
-    AddReducer(data, &graph_reducer, &dead_code_elimination);
-    AddReducer(data, &graph_reducer, &common_reducer);
-    AddReducer(data, &graph_reducer, &generic_lowering);
-    AddReducer(data, &graph_reducer, &select_lowering);
-    AddReducer(data, &graph_reducer, &tco);
-    graph_reducer.ReduceGraph();
-  }
-};
-
-
 struct ComputeSchedulePhase {
   static const char* phase_name() { return "scheduling"; }
 
@@ -915,13 +1163,14 @@
   }
 };
 
-
 template <typename RegAllocator>
-struct AllocateDoubleRegistersPhase {
-  static const char* phase_name() { return "allocate double registers"; }
+struct AllocateFPRegistersPhase {
+  static const char* phase_name() {
+    return "allocate floating point registers";
+  }
 
   void Run(PipelineData* data, Zone* temp_zone) {
-    RegAllocator allocator(data->register_allocation_data(), DOUBLE_REGISTERS,
+    RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
                            temp_zone);
     allocator.AllocateRegisters();
   }
@@ -1049,15 +1298,14 @@
     Graph* graph = data->graph();
 
     {  // Print JSON.
-      FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
-      if (json_file == nullptr) return;
-      OFStream json_of(json_file);
+      AllowHandleDereference allow_deref;
+      TurboJsonFile json_of(info, std::ios_base::app);
       json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
               << AsJSON(*graph, data->source_positions()) << "},\n";
-      fclose(json_file);
     }
 
     if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
+      AllowHandleDereference allow_deref;
       OFStream os(stdout);
       os << "-- Graph after " << phase << " -- " << std::endl;
       os << AsRPO(*graph);
@@ -1069,22 +1317,14 @@
 struct VerifyGraphPhase {
   static const char* phase_name() { return nullptr; }
 
-  void Run(PipelineData* data, Zone* temp_zone, const bool untyped) {
-    Verifier::Run(data->graph(), FLAG_turbo_types && !untyped
-                                     ? Verifier::TYPED
-                                     : Verifier::UNTYPED);
+  void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
+           bool values_only = false) {
+    Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
+                  values_only ? Verifier::kValuesOnly : Verifier::kAll);
   }
 };
 
-
-void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
-  if (data_->pipeline_statistics() != nullptr) {
-    data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
-  }
-}
-
-
-void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
+void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
   if (FLAG_trace_turbo) {
     Run<PrintGraphPhase>(phase);
   }
@@ -1093,46 +1333,10 @@
   }
 }
 
+bool PipelineImpl::CreateGraph() {
+  PipelineData* data = this->data_;
 
-Handle<Code> Pipeline::GenerateCode() {
-  ZonePool zone_pool(isolate()->allocator());
-  base::SmartPointer<PipelineStatistics> pipeline_statistics;
-
-  if (FLAG_turbo_stats) {
-    pipeline_statistics.Reset(new PipelineStatistics(info(), &zone_pool));
-    pipeline_statistics->BeginPhaseKind("initializing");
-  }
-
-  if (FLAG_trace_turbo) {
-    FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "w+");
-    if (json_file != nullptr) {
-      OFStream json_of(json_file);
-      Handle<Script> script = info()->script();
-      base::SmartArrayPointer<char> function_name = info()->GetDebugName();
-      int pos = info()->shared_info()->start_position();
-      json_of << "{\"function\":\"" << function_name.get()
-              << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
-      if (info()->has_literal() && !script->IsUndefined() &&
-          !script->source()->IsUndefined()) {
-        DisallowHeapAllocation no_allocation;
-        FunctionLiteral* function = info()->literal();
-        int start = function->start_position();
-        int len = function->end_position() - start;
-        String::SubStringRange source(String::cast(script->source()), start,
-                                      len);
-        for (const auto& c : source) {
-          json_of << AsEscapedUC16ForJSON(c);
-        }
-      }
-      json_of << "\",\n\"phases\":[";
-      fclose(json_file);
-    }
-  }
-
-  PipelineData data(&zone_pool, info(), pipeline_statistics.get());
-  this->data_ = &data;
-
-  BeginPhaseKind("graph creation");
+  data->BeginPhaseKind("graph creation");
 
   if (FLAG_trace_turbo) {
     OFStream os(stdout);
@@ -1143,18 +1347,19 @@
     tcf << AsC1VCompilation(info());
   }
 
-  data.source_positions()->AddDecorator();
+  data->source_positions()->AddDecorator();
 
   if (FLAG_loop_assignment_analysis) {
     Run<LoopAssignmentAnalysisPhase>();
   }
 
-  if (info()->is_typing_enabled()) {
-    Run<TypeHintAnalysisPhase>();
-  }
+  Run<TypeHintAnalysisPhase>();
 
   Run<GraphBuilderPhase>();
-  if (data.compilation_failed()) return Handle<Code>::null();
+  if (data->compilation_failed()) {
+    data->EndPhaseKind();
+    return false;
+  }
   RunPrintAndVerify("Initial untyped", true);
 
   // Perform OSR deconstruction.
@@ -1173,24 +1378,23 @@
 
   if (FLAG_print_turbo_replay) {
     // Print a replay of the initial graph.
-    GraphReplayPrinter::PrintReplay(data.graph());
+    GraphReplayPrinter::PrintReplay(data->graph());
   }
 
-  base::SmartPointer<Typer> typer;
-  if (info()->is_typing_enabled()) {
-    // Type the graph.
-    typer.Reset(new Typer(isolate(), data.graph(),
-                          info()->is_deoptimization_enabled()
-                              ? Typer::kDeoptimizationEnabled
-                              : Typer::kNoFlags,
-                          info()->dependencies()));
-    Run<TyperPhase>(typer.get());
+  // Run the type-sensitive lowerings and optimizations on the graph.
+  {
+    // Type the graph and keep the Typer running on newly created nodes within
+    // this scope; the Typer is automatically unlinked from the Graph once we
+    // leave this scope below.
+    Typer typer(isolate(), data->graph(), info()->is_deoptimization_enabled()
+                                              ? Typer::kDeoptimizationEnabled
+                                              : Typer::kNoFlags,
+                info()->dependencies());
+    Run<TyperPhase>(&typer);
     RunPrintAndVerify("Typed");
-  }
 
-  BeginPhaseKind("lowering");
+    data->BeginPhaseKind("lowering");
 
-  if (info()->is_typing_enabled()) {
     // Lower JSOperators where we can determine types.
     Run<TypedLoweringPhase>();
     RunPrintAndVerify("Lowered typed");
@@ -1205,55 +1409,79 @@
       RunPrintAndVerify("Escape Analysed");
     }
 
-    // Lower simplified operators and insert changes.
-    Run<SimplifiedLoweringPhase>();
-    RunPrintAndVerify("Lowered simplified");
+    // Select representations.
+    Run<RepresentationSelectionPhase>();
+    RunPrintAndVerify("Representations selected");
 
-    Run<BranchEliminationPhase>();
-    RunPrintAndVerify("Branch conditions eliminated");
-
-    // Optimize control flow.
-    if (FLAG_turbo_cf_optimization) {
-      Run<ControlFlowOptimizationPhase>();
-      RunPrintAndVerify("Control flow optimized");
-    }
-
-    // Lower changes that have been inserted before.
-    Run<ChangeLoweringPhase>();
-    // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
-    RunPrintAndVerify("Lowered changes", true);
+    // Run early optimization pass.
+    Run<EarlyOptimizationPhase>();
+    RunPrintAndVerify("Early optimized");
   }
 
-  // Lower any remaining generic JSOperators.
-  Run<GenericLoweringPhase>();
+#ifdef DEBUG
+  // From now on it is invalid to look at types on the nodes, because:
+  //
+  //  (a) The remaining passes (might) run concurrent to the main thread and
+  //      therefore must not access the Heap or the Isolate in an uncontrolled
+  //      way (as done by the type system), and
+  //  (b) the types on the nodes might not make sense after representation
+  //      selection due to the way we handle truncations; if we'd want to look
+  //      at types afterwards we'd essentially need to re-type (large portions
+  //      of) the graph.
+  //
+  // In order to catch bugs related to type access after this point we remove
+  // the types from the nodes at this point (currently only in Debug builds).
+  Run<UntyperPhase>();
+  RunPrintAndVerify("Untyped", true);
+#endif
+
+  data->EndPhaseKind();
+
+  return true;
+}
+
+bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
+  PipelineData* data = this->data_;
+
+  data->BeginPhaseKind("block building");
+
+  Run<EffectControlLinearizationPhase>();
+  RunPrintAndVerify("Effect and control linearized", true);
+
+  Run<BranchEliminationPhase>();
+  RunPrintAndVerify("Branch conditions eliminated", true);
+
+  // Optimize control flow.
+  if (FLAG_turbo_cf_optimization) {
+    Run<ControlFlowOptimizationPhase>();
+    RunPrintAndVerify("Control flow optimized", true);
+  }
+
+  // Optimize memory access and allocation operations.
+  Run<MemoryOptimizationPhase>();
   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
-  RunPrintAndVerify("Lowered generic", true);
+  RunPrintAndVerify("Memory optimized", true);
+
+  // Lower changes that have been inserted before.
+  Run<LateOptimizationPhase>();
+  // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+  RunPrintAndVerify("Late optimized", true);
 
   Run<LateGraphTrimmingPhase>();
   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
   RunPrintAndVerify("Late trimmed", true);
 
-  BeginPhaseKind("block building");
+  data->source_positions()->RemoveDecorator();
 
-  data.source_positions()->RemoveDecorator();
-
-  // Kill the Typer and thereby uninstall the decorator (if any).
-  typer.Reset(nullptr);
-
-  // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
-  if (data.compilation_failed()) return Handle<Code>::null();
-
-  return ScheduleAndGenerateCode(
-      Linkage::ComputeIncoming(data.instruction_zone(), info()));
+  return ScheduleAndSelectInstructions(linkage);
 }
 
-
 Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
                                                CallDescriptor* call_descriptor,
                                                Graph* graph, Schedule* schedule,
                                                Code::Flags flags,
                                                const char* debug_name) {
-  CompilationInfo info(debug_name, isolate, graph->zone(), flags);
+  CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), flags);
 
   // Construct a pipeline for scheduling and code generation.
   ZonePool zone_pool(isolate->allocator());
@@ -1264,25 +1492,36 @@
     pipeline_statistics->BeginPhaseKind("stub codegen");
   }
 
-  Pipeline pipeline(&info);
-  pipeline.data_ = &data;
+  PipelineImpl pipeline(&data);
   DCHECK_NOT_NULL(data.schedule());
 
   if (FLAG_trace_turbo) {
-    FILE* json_file = OpenVisualizerLogFile(&info, nullptr, "json", "w+");
-    if (json_file != nullptr) {
-      OFStream json_of(json_file);
-      json_of << "{\"function\":\"" << info.GetDebugName().get()
-              << "\", \"source\":\"\",\n\"phases\":[";
-      fclose(json_file);
-    }
+    TurboJsonFile json_of(&info, std::ios_base::trunc);
+    json_of << "{\"function\":\"" << info.GetDebugName().get()
+            << "\", \"source\":\"\",\n\"phases\":[";
     pipeline.Run<PrintGraphPhase>("Machine");
   }
 
+  pipeline.Run<VerifyGraphPhase>(false, true);
   return pipeline.ScheduleAndGenerateCode(call_descriptor);
 }
 
+// static
+Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
+  ZonePool zone_pool(info->isolate()->allocator());
+  base::SmartPointer<PipelineStatistics> pipeline_statistics(
+      CreatePipelineStatistics(info, &zone_pool));
+  PipelineData data(&zone_pool, info, pipeline_statistics.get());
+  PipelineImpl pipeline(&data);
 
+  Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
+
+  if (!pipeline.CreateGraph()) return Handle<Code>::null();
+  if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
+  return pipeline.GenerateCode(&linkage);
+}
+
+// static
 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
                                               Graph* graph,
                                               Schedule* schedule) {
@@ -1291,7 +1530,7 @@
   return GenerateCodeForTesting(info, call_descriptor, graph, schedule);
 }
 
-
+// static
 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
                                               CallDescriptor* call_descriptor,
                                               Graph* graph,
@@ -1305,33 +1544,47 @@
     pipeline_statistics->BeginPhaseKind("test codegen");
   }
 
-  Pipeline pipeline(info);
-  pipeline.data_ = &data;
-  if (data.schedule() == nullptr) {
-    // TODO(rossberg): Should this really be untyped?
-    pipeline.RunPrintAndVerify("Machine", true);
+  PipelineImpl pipeline(&data);
+
+  if (FLAG_trace_turbo) {
+    TurboJsonFile json_of(info, std::ios_base::trunc);
+    json_of << "{\"function\":\"" << info->GetDebugName().get()
+            << "\", \"source\":\"\",\n\"phases\":[";
   }
+  // TODO(rossberg): Should this really be untyped?
+  pipeline.RunPrintAndVerify("Machine", true);
 
   return pipeline.ScheduleAndGenerateCode(call_descriptor);
 }
 
+// static
+CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
+  return new PipelineCompilationJob(function->GetIsolate(), function);
+}
+
+// static
+CompilationJob* Pipeline::NewWasmCompilationJob(
+    CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
+    SourcePositionTable* source_positions) {
+  return new PipelineWasmCompilationJob(info, graph, descriptor,
+                                        source_positions);
+}
 
 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
                                            InstructionSequence* sequence,
                                            bool run_verifier) {
-  CompilationInfo info("testing", sequence->isolate(), sequence->zone());
+  CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
+                       sequence->zone());
   ZonePool zone_pool(sequence->isolate()->allocator());
   PipelineData data(&zone_pool, &info, sequence);
-  Pipeline pipeline(&info);
-  pipeline.data_ = &data;
+  PipelineImpl pipeline(&data);
   pipeline.data_->InitializeFrameData(nullptr);
   pipeline.AllocateRegisters(config, nullptr, run_verifier);
   return !data.compilation_failed();
 }
 
-
-Handle<Code> Pipeline::ScheduleAndGenerateCode(
-    CallDescriptor* call_descriptor) {
+bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage) {
+  CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
   PipelineData* data = this->data_;
 
   DCHECK_NOT_NULL(data->graph());
@@ -1339,34 +1592,34 @@
   if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
   TraceSchedule(data->info(), data->schedule());
 
-  BasicBlockProfiler::Data* profiler_data = nullptr;
   if (FLAG_turbo_profiling) {
-    profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
-                                                       data->schedule());
+    data->set_profiler_data(BasicBlockInstrumentor::Instrument(
+        info(), data->graph(), data->schedule()));
   }
 
   data->InitializeInstructionSequence(call_descriptor);
 
   data->InitializeFrameData(call_descriptor);
   // Select and schedule instructions covering the scheduled graph.
-  Linkage linkage(call_descriptor);
-  Run<InstructionSelectionPhase>(&linkage);
+  Run<InstructionSelectionPhase>(linkage);
 
   if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
+    AllowHandleDereference allow_deref;
     TurboCfgFile tcf(isolate());
     tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
                  data->sequence());
   }
 
-  std::ostringstream source_position_output;
   if (FLAG_trace_turbo) {
+    std::ostringstream source_position_output;
     // Output source position information before the graph is deleted.
     data_->source_positions()->Print(source_position_output);
+    data_->set_source_position_output(source_position_output.str());
   }
 
   data->DeleteGraphZone();
 
-  BeginPhaseKind("register allocation");
+  data->BeginPhaseKind("register allocation");
 
   bool run_verifier = FLAG_turbo_verify_allocation;
 
@@ -1377,10 +1630,10 @@
   Run<FrameElisionPhase>();
   if (data->compilation_failed()) {
     info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
-    return Handle<Code>();
+    data->EndPhaseKind();
+    return false;
   }
 
-  BeginPhaseKind("code generation");
   // TODO(mtrofin): move this off to the register allocator.
   bool generate_frame_at_start =
       data_->sequence()->instruction_blocks().front()->must_construct_frame();
@@ -1389,15 +1642,25 @@
     Run<JumpThreadingPhase>(generate_frame_at_start);
   }
 
+  data->EndPhaseKind();
+
+  return true;
+}
+
+Handle<Code> PipelineImpl::GenerateCode(Linkage* linkage) {
+  PipelineData* data = this->data_;
+
+  data->BeginPhaseKind("code generation");
+
   // Generate final machine code.
-  Run<GenerateCodePhase>(&linkage);
+  Run<GenerateCodePhase>(linkage);
 
   Handle<Code> code = data->code();
-  if (profiler_data != nullptr) {
+  if (data->profiler_data()) {
 #if ENABLE_DISASSEMBLER
     std::ostringstream os;
     code->Disassemble(nullptr, os);
-    profiler_data->SetCode(&os);
+    data->profiler_data()->SetCode(&os);
 #endif
   }
 
@@ -1405,25 +1668,21 @@
   v8::internal::CodeGenerator::PrintCode(code, info());
 
   if (FLAG_trace_turbo) {
-    FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "a+");
-    if (json_file != nullptr) {
-      OFStream json_of(json_file);
-      json_of
-          << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+    TurboJsonFile json_of(info(), std::ios_base::app);
+    json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
 #if ENABLE_DISASSEMBLER
-      std::stringstream disassembly_stream;
-      code->Disassemble(nullptr, disassembly_stream);
-      std::string disassembly_string(disassembly_stream.str());
-      for (const auto& c : disassembly_string) {
-        json_of << AsEscapedUC16ForJSON(c);
-      }
-#endif  // ENABLE_DISASSEMBLER
-      json_of << "\"}\n],\n";
-      json_of << "\"nodePositions\":";
-      json_of << source_position_output.str();
-      json_of << "}";
-      fclose(json_file);
+    std::stringstream disassembly_stream;
+    code->Disassemble(nullptr, disassembly_stream);
+    std::string disassembly_string(disassembly_stream.str());
+    for (const auto& c : disassembly_string) {
+      json_of << AsEscapedUC16ForJSON(c);
     }
+#endif  // ENABLE_DISASSEMBLER
+    json_of << "\"}\n],\n";
+    json_of << "\"nodePositions\":";
+    json_of << data->source_position_output();
+    json_of << "}";
+
     OFStream os(stdout);
     os << "---------------------------------------------------\n"
        << "Finished compiling method " << info()->GetDebugName().get()
@@ -1433,12 +1692,21 @@
   return code;
 }
 
+Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
+    CallDescriptor* call_descriptor) {
+  Linkage linkage(call_descriptor);
 
-void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
-                                 CallDescriptor* descriptor,
-                                 bool run_verifier) {
+  // Schedule the graph, perform instruction selection and register allocation.
+  if (!ScheduleAndSelectInstructions(&linkage)) return Handle<Code>();
+
+  // Generate the final machine code.
+  return GenerateCode(&linkage);
+}
+
+void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
+                                     CallDescriptor* descriptor,
+                                     bool run_verifier) {
   PipelineData* data = this->data_;
-
   // Don't track usage for this zone in compiler stats.
   base::SmartPointer<Zone> verifier_zone;
   RegisterAllocatorVerifier* verifier = nullptr;
@@ -1448,14 +1716,13 @@
         verifier_zone.get(), config, data->sequence());
   }
 
-  base::SmartArrayPointer<char> debug_name;
 #ifdef DEBUG
-  debug_name = info()->GetDebugName();
   data_->sequence()->ValidateEdgeSplitForm();
+  data_->sequence()->ValidateDeferredBlockEntryPaths();
   data_->sequence()->ValidateDeferredBlockExitPaths();
 #endif
 
-  data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
+  data->InitializeRegisterAllocationData(config, descriptor);
   if (info()->is_osr()) {
     OsrHelper osr_helper(info());
     osr_helper.SetupFrame(data->frame());
@@ -1465,10 +1732,10 @@
   Run<ResolvePhisPhase>();
   Run<BuildLiveRangesPhase>();
   if (FLAG_trace_turbo_graph) {
+    AllowHandleDereference allow_deref;
     OFStream os(stdout);
-    PrintableInstructionSequence printable = {config, data->sequence()};
     os << "----- Instruction sequence before register allocation -----\n"
-       << printable;
+       << PrintableInstructionSequence({config, data->sequence()});
   }
   if (verifier != nullptr) {
     CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
@@ -1482,10 +1749,10 @@
 
   if (FLAG_turbo_greedy_regalloc) {
     Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
-    Run<AllocateDoubleRegistersPhase<GreedyAllocator>>();
+    Run<AllocateFPRegistersPhase<GreedyAllocator>>();
   } else {
     Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
-    Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
+    Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
   }
 
   if (FLAG_turbo_preprocess_ranges) {
@@ -1505,10 +1772,10 @@
   Run<LocateSpillSlotsPhase>();
 
   if (FLAG_trace_turbo_graph) {
+    AllowHandleDereference allow_deref;
     OFStream os(stdout);
-    PrintableInstructionSequence printable = {config, data->sequence()};
     os << "----- Instruction sequence after register allocation -----\n"
-       << printable;
+       << PrintableInstructionSequence({config, data->sequence()});
   }
 
   if (verifier != nullptr) {
@@ -1525,7 +1792,9 @@
   data->DeleteRegisterAllocationZone();
 }
 
-Isolate* Pipeline::isolate() const { return info()->isolate(); }
+CompilationInfo* PipelineImpl::info() const { return data_->info(); }
+
+Isolate* PipelineImpl::isolate() const { return info()->isolate(); }
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index edb8191..64befbf 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -13,6 +13,7 @@
 namespace internal {
 
 class CompilationInfo;
+class CompilationJob;
 class RegisterConfiguration;
 
 namespace compiler {
@@ -20,16 +21,18 @@
 class CallDescriptor;
 class Graph;
 class InstructionSequence;
-class Linkage;
-class PipelineData;
 class Schedule;
+class SourcePositionTable;
 
-class Pipeline {
+class Pipeline : public AllStatic {
  public:
-  explicit Pipeline(CompilationInfo* info) : info_(info) {}
+  // Returns a new compilation job for the given function.
+  static CompilationJob* NewCompilationJob(Handle<JSFunction> function);
 
-  // Run the entire pipeline and generate a handle to a code object.
-  Handle<Code> GenerateCode();
+  // Returns a new compilation job for the WebAssembly compilation info.
+  static CompilationJob* NewWasmCompilationJob(
+      CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
+      SourcePositionTable* source_positions);
 
   // Run the pipeline on a machine graph and generate code. The {schedule} must
   // be valid, hence the given {graph} does not need to be schedulable.
@@ -39,6 +42,10 @@
                                               Code::Flags flags,
                                               const char* debug_name);
 
+  // Run the entire pipeline and generate a handle to a code object suitable for
+  // testing.
+  static Handle<Code> GenerateCodeForTesting(CompilationInfo* info);
+
   // Run the pipeline on a machine graph and generate code. If {schedule} is
   // {nullptr}, then compute a new schedule for code generation.
   static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
@@ -58,27 +65,7 @@
                                              Schedule* schedule = nullptr);
 
  private:
-  // Helpers for executing pipeline phases.
-  template <typename Phase>
-  void Run();
-  template <typename Phase, typename Arg0>
-  void Run(Arg0 arg_0);
-  template <typename Phase, typename Arg0, typename Arg1>
-  void Run(Arg0 arg_0, Arg1 arg_1);
-
-  void BeginPhaseKind(const char* phase_kind);
-  void RunPrintAndVerify(const char* phase, bool untyped = false);
-  Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
-  void AllocateRegisters(const RegisterConfiguration* config,
-                         CallDescriptor* descriptor, bool run_verifier);
-
-  CompilationInfo* info() const { return info_; }
-  Isolate* isolate() const;
-
-  CompilationInfo* const info_;
-  PipelineData* data_;
-
-  DISALLOW_COPY_AND_ASSIGN(Pipeline);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 6f1e588..8a0c585 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -103,7 +103,7 @@
 
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
   }
 
@@ -175,7 +175,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
                        Register value, Register scratch0, Register scratch1,
@@ -259,15 +260,10 @@
 #if V8_TARGET_ARCH_PPC64
         case kPPC_Add:
         case kPPC_Sub:
-          return lt;
 #endif
         case kPPC_AddWithOverflow32:
         case kPPC_SubWithOverflow32:
-#if V8_TARGET_ARCH_PPC64
-          return ne;
-#else
           return lt;
-#endif
         default:
           break;
       }
@@ -277,15 +273,10 @@
 #if V8_TARGET_ARCH_PPC64
         case kPPC_Add:
         case kPPC_Sub:
-          return ge;
 #endif
         case kPPC_AddWithOverflow32:
         case kPPC_SubWithOverflow32:
-#if V8_TARGET_ARCH_PPC64
-          return eq;
-#else
           return ge;
-#endif
         default:
           break;
       }
@@ -378,17 +369,16 @@
 
 
 #if V8_TARGET_ARCH_PPC64
-#define ASSEMBLE_ADD_WITH_OVERFLOW32()           \
-  do {                                           \
-    ASSEMBLE_BINOP(add, addi);                   \
-    __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+#define ASSEMBLE_ADD_WITH_OVERFLOW32()         \
+  do {                                         \
+    ASSEMBLE_ADD_WITH_OVERFLOW();              \
+    __ extsw(kScratchReg, kScratchReg, SetRC); \
   } while (0)
 
-
-#define ASSEMBLE_SUB_WITH_OVERFLOW32()           \
-  do {                                           \
-    ASSEMBLE_BINOP(sub, subi);                   \
-    __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+#define ASSEMBLE_SUB_WITH_OVERFLOW32()         \
+  do {                                         \
+    ASSEMBLE_SUB_WITH_OVERFLOW();              \
+    __ extsw(kScratchReg, kScratchReg, SetRC); \
   } while (0)
 #else
 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
@@ -536,8 +526,13 @@
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
   } while (0)
 
-
+#if V8_TARGET_ARCH_PPC64
 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define CleanUInt32(x) __ ClearLeftImm(x, x, Operand(32))
+#else
+#define CleanUInt32(x)
+#endif
+
 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width)  \
   do {                                                             \
     DoubleRegister result = i.OutputDoubleRegister();              \
@@ -546,7 +541,6 @@
     MemOperand operand = i.MemoryOperand(&mode, index);            \
     DCHECK_EQ(kMode_MRR, mode);                                    \
     Register offset = operand.rb();                                \
-    __ extsw(offset, offset);                                      \
     if (HasRegisterInput(instr, 2)) {                              \
       __ cmplw(offset, i.InputRegister(2));                        \
     } else {                                                       \
@@ -557,14 +551,13 @@
     if (mode == kMode_MRI) {                                       \
       __ asm_instr(result, operand);                               \
     } else {                                                       \
+      CleanUInt32(offset);                                         \
       __ asm_instrx(result, operand);                              \
     }                                                              \
     __ bind(ool->exit());                                          \
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                           \
   } while (0)
 
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
   do {                                                       \
     Register result = i.OutputRegister();                    \
@@ -573,7 +566,6 @@
     MemOperand operand = i.MemoryOperand(&mode, index);      \
     DCHECK_EQ(kMode_MRR, mode);                              \
     Register offset = operand.rb();                          \
-    __ extsw(offset, offset);                                \
     if (HasRegisterInput(instr, 2)) {                        \
       __ cmplw(offset, i.InputRegister(2));                  \
     } else {                                                 \
@@ -584,14 +576,13 @@
     if (mode == kMode_MRI) {                                 \
       __ asm_instr(result, operand);                         \
     } else {                                                 \
+      CleanUInt32(offset);                                   \
       __ asm_instrx(result, operand);                        \
     }                                                        \
     __ bind(ool->exit());                                    \
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                     \
   } while (0)
 
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
   do {                                                  \
     Label done;                                         \
@@ -600,7 +591,6 @@
     MemOperand operand = i.MemoryOperand(&mode, index); \
     DCHECK_EQ(kMode_MRR, mode);                         \
     Register offset = operand.rb();                     \
-    __ extsw(offset, offset);                           \
     if (HasRegisterInput(instr, 2)) {                   \
       __ cmplw(offset, i.InputRegister(2));             \
     } else {                                            \
@@ -612,14 +602,13 @@
     if (mode == kMode_MRI) {                            \
       __ stfs(kScratchDoubleReg, operand);              \
     } else {                                            \
+      CleanUInt32(offset);                              \
       __ stfsx(kScratchDoubleReg, operand);             \
     }                                                   \
     __ bind(&done);                                     \
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
   } while (0)
 
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
   do {                                                  \
     Label done;                                         \
@@ -628,7 +617,6 @@
     MemOperand operand = i.MemoryOperand(&mode, index); \
     DCHECK_EQ(kMode_MRR, mode);                         \
     Register offset = operand.rb();                     \
-    __ extsw(offset, offset);                           \
     if (HasRegisterInput(instr, 2)) {                   \
       __ cmplw(offset, i.InputRegister(2));             \
     } else {                                            \
@@ -639,14 +627,13 @@
     if (mode == kMode_MRI) {                            \
       __ stfd(value, operand);                          \
     } else {                                            \
+      CleanUInt32(offset);                              \
       __ stfdx(value, operand);                         \
     }                                                   \
     __ bind(&done);                                     \
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
   } while (0)
 
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
   do {                                                        \
     Label done;                                               \
@@ -655,7 +642,6 @@
     MemOperand operand = i.MemoryOperand(&mode, index);       \
     DCHECK_EQ(kMode_MRR, mode);                               \
     Register offset = operand.rb();                           \
-    __ extsw(offset, offset);                                 \
     if (HasRegisterInput(instr, 2)) {                         \
       __ cmplw(offset, i.InputRegister(2));                   \
     } else {                                                  \
@@ -666,18 +652,49 @@
     if (mode == kMode_MRI) {                                  \
       __ asm_instr(value, operand);                           \
     } else {                                                  \
+      CleanUInt32(offset);                                    \
       __ asm_instrx(value, operand);                          \
     }                                                         \
     __ bind(&done);                                           \
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
   } while (0)
 
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx)   \
+  do {                                                        \
+    Label done;                                               \
+    Register result = i.OutputRegister();                     \
+    AddressingMode mode = kMode_None;                         \
+    MemOperand operand = i.MemoryOperand(&mode);              \
+    __ sync();                                                \
+    if (mode == kMode_MRI) {                                  \
+    __ asm_instr(result, operand);                            \
+    } else {                                                  \
+    __ asm_instrx(result, operand);                           \
+    }                                                         \
+    __ bind(&done);                                           \
+    __ cmp(result, result);                                   \
+    __ bne(&done);                                            \
+    __ isync();                                               \
+  } while (0)
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx)  \
+  do {                                                        \
+    size_t index = 0;                                         \
+    AddressingMode mode = kMode_None;                         \
+    MemOperand operand = i.MemoryOperand(&mode, &index);      \
+    Register value = i.InputRegister(index);                  \
+    __ sync();                                                \
+    if (mode == kMode_MRI) {                                  \
+      __ asm_instr(value, operand);                           \
+    } else {                                                  \
+      __ asm_instrx(value, operand);                          \
+    }                                                         \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
+  } while (0)
+
 void CodeGenerator::AssembleDeconstructFrame() {
   __ LeaveFrame(StackFrame::MANUAL);
 }
 
-void CodeGenerator::AssembleSetupStackPointer() {}
-
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -725,7 +742,8 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   PPCOperandConverter i(this, instr);
   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
 
@@ -771,6 +789,14 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!instr->InputAt(0)->IsImmediate());
+      __ Jump(i.InputRegister(0));
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
           masm());
@@ -858,7 +884,9 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -1281,7 +1309,7 @@
       break;
 #endif
     case kPPC_Push:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
       } else {
@@ -1292,21 +1320,22 @@
       break;
     case kPPC_PushFrame: {
       int num_slots = i.InputInt32(1);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
-        __ stfdu(i.InputDoubleRegister(0),
-                 MemOperand(sp, -num_slots * kPointerSize));
+      if (instr->InputAt(0)->IsFPRegister()) {
+        __ StoreDoubleU(i.InputDoubleRegister(0),
+                        MemOperand(sp, -num_slots * kPointerSize), r0);
       } else {
         __ StorePU(i.InputRegister(0),
-                   MemOperand(sp, -num_slots * kPointerSize));
+                   MemOperand(sp, -num_slots * kPointerSize), r0);
       }
       break;
     }
     case kPPC_StoreToStackSlot: {
       int slot = i.InputInt32(1);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
-        __ stfd(i.InputDoubleRegister(0), MemOperand(sp, slot * kPointerSize));
+      if (instr->InputAt(0)->IsFPRegister()) {
+        __ StoreDouble(i.InputDoubleRegister(0),
+                       MemOperand(sp, slot * kPointerSize), r0);
       } else {
-        __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+        __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
       }
       break;
     }
@@ -1492,6 +1521,9 @@
     case kPPC_LoadWordS16:
       ASSEMBLE_LOAD_INTEGER(lha, lhax);
       break;
+    case kPPC_LoadWordU32:
+      ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+      break;
     case kPPC_LoadWordS32:
       ASSEMBLE_LOAD_INTEGER(lwa, lwax);
       break;
@@ -1540,7 +1572,7 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
       break;
     case kCheckedLoadWord32:
-      ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx);
       break;
     case kCheckedLoadWord64:
 #if V8_TARGET_ARCH_PPC64
@@ -1577,10 +1609,38 @@
     case kCheckedStoreFloat64:
       ASSEMBLE_CHECKED_STORE_DOUBLE();
       break;
+
+    case kAtomicLoadInt8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
+      __ extsb(i.OutputRegister(), i.OutputRegister());
+      break;
+    case kAtomicLoadUint8:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
+      break;
+    case kAtomicLoadInt16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
+      break;
+    case kAtomicLoadUint16:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
+      break;
+    case kAtomicLoadWord32:
+      ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
+      break;
+
+    case kAtomicStoreWord8:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
+      break;
+    case kAtomicStoreWord16:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
+      break;
+    case kAtomicStoreWord32:
+      ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
+      break;
     default:
       UNREACHABLE();
       break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 
@@ -1676,7 +1736,7 @@
   PPCOperandConverter i(this, instr);
   Register input = i.InputRegister(0);
   for (size_t index = 2; index < instr->InputCount(); index += 2) {
-    __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
+    __ Cmpwi(input, Operand(i.InputInt32(index + 0)), r0);
     __ beq(GetLabel(i.InputRpo(index + 1)));
   }
   AssembleArchJump(i.InputRpo(1));
@@ -1700,19 +1760,45 @@
   __ Jump(kScratchReg);
 }
 
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  return kSuccess;
 }
 
+void CodeGenerator::FinishFrame(Frame* frame) {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
 
-void CodeGenerator::AssemblePrologue() {
+  // Save callee-saved Double registers.
+  if (double_saves != 0) {
+    frame->AlignSavedCalleeRegisterSlots();
+    DCHECK(kNumCalleeSavedDoubles ==
+           base::bits::CountPopulation32(double_saves));
+    frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+                                             (kDoubleSize / kPointerSize));
+  }
+  // Save callee-saved registers.
+  const RegList saves =
+      FLAG_enable_embedded_constant_pool
+          ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+          : descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    // register save area does not include the fp or constant pool pointer.
+    const int num_saves =
+        kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+    DCHECK(num_saves == base::bits::CountPopulation32(saves));
+    frame->AllocateSavedCalleeRegisterSlots(num_saves);
+  }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (frame_access_state()->has_frame()) {
     if (descriptor->IsCFunctionCall()) {
@@ -1736,7 +1822,7 @@
     }
   }
 
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1747,15 +1833,12 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
-  if (double_saves != 0) {
-    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
-  }
-  if (stack_shrink_slots > 0) {
-    __ Add(sp, sp, -stack_shrink_slots * kPointerSize, r0);
+  if (shrink_slots > 0) {
+    __ Add(sp, sp, -shrink_slots * kPointerSize, r0);
   }
 
   // Save callee-saved Double registers.
@@ -1763,8 +1846,6 @@
     __ MultiPushDoubles(double_saves);
     DCHECK(kNumCalleeSavedDoubles ==
            base::bits::CountPopulation32(double_saves));
-    frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
-                                              (kDoubleSize / kPointerSize));
   }
 
   // Save callee-saved registers.
@@ -1775,10 +1856,6 @@
   if (saves != 0) {
     __ MultiPush(saves);
     // register save area does not include the fp or constant pool pointer.
-    const int num_saves =
-        kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
-    DCHECK(num_saves == base::bits::CountPopulation32(saves));
-    frame()->AllocateSavedCalleeRegisterSlots(num_saves);
   }
 }
 
@@ -1848,10 +1925,28 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          __ mov(dst, Operand(src.ToInt32()));
+#if V8_TARGET_ARCH_PPC64
+          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#else
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#endif
+            __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+          } else {
+            __ mov(dst, Operand(src.ToInt32()));
+          }
           break;
         case Constant::kInt64:
-          __ mov(dst, Operand(src.ToInt64()));
+#if V8_TARGET_ARCH_PPC64
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+            __ mov(dst, Operand(src.ToInt64(), src.rmode()));
+          } else {
+            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+#endif
+            __ mov(dst, Operand(src.ToInt64()));
+#if V8_TARGET_ARCH_PPC64
+          }
+#endif
           break;
         case Constant::kFloat32:
           __ Move(dst,
@@ -1885,29 +1980,29 @@
         __ StoreP(dst, g.ToMemOperand(destination), r0);
       }
     } else {
-      DoubleRegister dst = destination->IsDoubleRegister()
+      DoubleRegister dst = destination->IsFPRegister()
                                ? g.ToDoubleRegister(destination)
                                : kScratchDoubleReg;
       double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
                                                         : src.ToFloat64();
       __ LoadDoubleLiteral(dst, value, kScratchReg);
-      if (destination->IsDoubleStackSlot()) {
+      if (destination->IsFPStackSlot()) {
         __ StoreDouble(dst, g.ToMemOperand(destination), r0);
       }
     }
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     DoubleRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       DoubleRegister dst = g.ToDoubleRegister(destination);
       __ Move(dst, src);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       __ StoreDouble(src, g.ToMemOperand(destination), r0);
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
     } else {
       DoubleRegister temp = kScratchDoubleReg;
@@ -1942,7 +2037,7 @@
       __ StoreP(temp, dst);
     }
 #if V8_TARGET_ARCH_PPC64
-  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+  } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
 #else
   } else if (source->IsStackSlot()) {
     DCHECK(destination->IsStackSlot());
@@ -1955,24 +2050,24 @@
     __ LoadP(temp_1, dst);
     __ StoreP(temp_0, dst);
     __ StoreP(temp_1, src);
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     DoubleRegister temp = kScratchDoubleReg;
     DoubleRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       DoubleRegister dst = g.ToDoubleRegister(destination);
       __ fmr(temp, src);
       __ fmr(src, dst);
       __ fmr(dst, temp);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       MemOperand dst = g.ToMemOperand(destination);
       __ fmr(temp, src);
       __ lfd(src, dst);
       __ stfd(temp, dst);
     }
 #if !V8_TARGET_ARCH_PPC64
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPStackSlot());
     DoubleRegister temp_0 = kScratchDoubleReg;
     DoubleRegister temp_1 = d0;
     MemOperand src = g.ToMemOperand(source);
@@ -1996,11 +2091,6 @@
 }
 
 
-void CodeGenerator::AddNopForSmiCodeInlining() {
-  // We do not insert nops for inlined Smi code.
-}
-
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index 66c2e99..23cd235 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -112,6 +112,7 @@
   V(PPC_LoadWordS16)               \
   V(PPC_LoadWordU16)               \
   V(PPC_LoadWordS32)               \
+  V(PPC_LoadWordU32)               \
   V(PPC_LoadWord64)                \
   V(PPC_LoadFloat32)               \
   V(PPC_LoadDouble)                \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index e7d7719..1259a87 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -113,6 +113,7 @@
     case kPPC_LoadWordS16:
     case kPPC_LoadWordU16:
     case kPPC_LoadWordS32:
+    case kPPC_LoadWordU32:
     case kPPC_LoadWord64:
     case kPPC_LoadFloat32:
     case kPPC_LoadDouble:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index 5abb5f1..b8ca3ba 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -190,11 +190,7 @@
     case MachineRepresentation::kTagged:  // Fall through.
 #endif
     case MachineRepresentation::kWord32:
-      opcode = kPPC_LoadWordS32;
-#if V8_TARGET_ARCH_PPC64
-      // TODO(mbrandy): this applies to signed loads only (lwa)
-      mode = kInt16Imm_4ByteAligned;
-#endif
+      opcode = kPPC_LoadWordU32;
       break;
 #if V8_TARGET_ARCH_PPC64
     case MachineRepresentation::kTagged:  // Fall through.
@@ -1137,15 +1133,12 @@
   VisitRR(this, kPPC_DoubleToFloat32, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+  VisitRR(this, kArchTruncateDoubleToI, node);
+}
 
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      return VisitRR(this, kArchTruncateDoubleToI, node);
-    case TruncationMode::kRoundToZero:
-      return VisitRR(this, kPPC_DoubleToInt32, node);
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  VisitRR(this, kPPC_DoubleToInt32, node);
 }
 
 
@@ -1233,6 +1226,10 @@
   VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  PPCOperandGenerator g(this);
+  VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
+}
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   // TODO(mbrandy): detect multiply-subtract
@@ -1259,6 +1256,9 @@
   VisitRRR(this, kPPC_SubDouble, node);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  VisitRRR(this, kPPC_SubDouble, node);
+}
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
@@ -1921,6 +1921,60 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  PPCOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicLoadWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Emit(opcode | AddressingModeField::encode(kMode_MRR),
+      g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  PPCOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kAtomicStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kAtomicStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicStoreWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  inputs[input_count++] = g.UseUniqueRegister(base);
+  inputs[input_count++] = g.UseUniqueRegister(index);
+  inputs[input_count++] = g.UseUniqueRegister(value);
+  Emit(opcode | AddressingModeField::encode(kMode_MRR),
+      0, nullptr, input_count, inputs);
+}
 
 // static
 MachineOperatorBuilder::Flags
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 728d79a..9407da6 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -35,6 +35,12 @@
   graph->SetEnd(graph->NewNode(common_.End(0)));
 }
 
+Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
+                                                     RelocInfo::Mode rmode) {
+  return kPointerSize == 8
+             ? RelocatableInt64Constant(value, rmode)
+             : RelocatableInt32Constant(static_cast<int>(value), rmode);
+}
 
 Schedule* RawMachineAssembler::Export() {
   // Compute the correct codegen order.
@@ -44,7 +50,7 @@
     PrintF("--- RAW SCHEDULE -------------------------------------------\n");
     os << *schedule_;
   }
-  schedule_->EnsureSplitEdgeForm();
+  schedule_->EnsureCFGWellFormedness();
   schedule_->PropagateDeferredMark();
   if (FLAG_trace_turbo_scheduler) {
     PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
@@ -109,7 +115,6 @@
 
 void RawMachineAssembler::Return(Node* value) {
   Node* ret = MakeNode(common()->Return(), 1, &value);
-  NodeProperties::MergeControlToEnd(graph(), common(), ret);
   schedule()->AddReturn(CurrentBlock(), ret);
   current_block_ = nullptr;
 }
@@ -118,7 +123,6 @@
 void RawMachineAssembler::Return(Node* v1, Node* v2) {
   Node* values[] = {v1, v2};
   Node* ret = MakeNode(common()->Return(2), 2, values);
-  NodeProperties::MergeControlToEnd(graph(), common(), ret);
   schedule()->AddReturn(CurrentBlock(), ret);
   current_block_ = nullptr;
 }
@@ -127,7 +131,6 @@
 void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
   Node* values[] = {v1, v2, v3};
   Node* ret = MakeNode(common()->Return(3), 3, values);
-  NodeProperties::MergeControlToEnd(graph(), common(), ret);
   schedule()->AddReturn(CurrentBlock(), ret);
   current_block_ = nullptr;
 }
@@ -254,7 +257,6 @@
     buffer[index++] = args[i];
   }
   Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
-  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
   schedule()->AddTailCall(CurrentBlock(), tail_call);
   current_block_ = nullptr;
   return tail_call;
@@ -276,7 +278,6 @@
   Node* nodes[] = {centry, ref, arity, context};
   Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
 
-  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
   schedule()->AddTailCall(CurrentBlock(), tail_call);
   current_block_ = nullptr;
   return tail_call;
@@ -298,7 +299,6 @@
   Node* nodes[] = {centry, arg1, ref, arity, context};
   Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
 
-  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
   schedule()->AddTailCall(CurrentBlock(), tail_call);
   current_block_ = nullptr;
   return tail_call;
@@ -322,7 +322,6 @@
   Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
   Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
 
-  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
   schedule()->AddTailCall(CurrentBlock(), tail_call);
   current_block_ = nullptr;
   return tail_call;
@@ -345,7 +344,6 @@
   Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
   Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
 
-  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
   schedule()->AddTailCall(CurrentBlock(), tail_call);
   current_block_ = nullptr;
   return tail_call;
@@ -368,7 +366,6 @@
   Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
   Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
 
-  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
   schedule()->AddTailCall(CurrentBlock(), tail_call);
   current_block_ = nullptr;
   return tail_call;
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index f3445ac..69ddd50 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -76,6 +76,7 @@
     return kPointerSize == 8 ? Int64Constant(value)
                              : Int32Constant(static_cast<int>(value));
   }
+  Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
   Node* Int32Constant(int32_t value) {
     return AddNode(common()->Int32Constant(value));
   }
@@ -104,6 +105,12 @@
   Node* ExternalConstant(ExternalReference address) {
     return AddNode(common()->ExternalConstant(address));
   }
+  Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
+    return AddNode(common()->RelocatableInt32Constant(value, rmode));
+  }
+  Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
+    return AddNode(common()->RelocatableInt64Constant(value, rmode));
+  }
 
   Node* Projection(int index, Node* a) {
     return AddNode(common()->Projection(index), a);
@@ -126,6 +133,15 @@
                    base, index, value);
   }
 
+  // Atomic memory operations.
+  Node* AtomicLoad(MachineType rep, Node* base, Node* index) {
+    return AddNode(machine()->AtomicLoad(rep), base, index);
+  }
+  Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+                    Node* value) {
+    return AddNode(machine()->AtomicStore(rep), base, index, value);
+  }
+
   // Arithmetic Operations.
   Node* WordAnd(Node* a, Node* b) {
     return AddNode(machine()->WordAnd(), a, b);
@@ -353,6 +369,8 @@
   INTPTR_BINOP(Int, AddWithOverflow);
   INTPTR_BINOP(Int, Sub);
   INTPTR_BINOP(Int, SubWithOverflow);
+  INTPTR_BINOP(Int, Mul);
+  INTPTR_BINOP(Int, Div);
   INTPTR_BINOP(Int, LessThan);
   INTPTR_BINOP(Int, LessThanOrEqual);
   INTPTR_BINOP(Word, Equal);
@@ -381,6 +399,9 @@
   Node* Float32Sub(Node* a, Node* b) {
     return AddNode(machine()->Float32Sub(), a, b);
   }
+  Node* Float32SubPreserveNan(Node* a, Node* b) {
+    return AddNode(machine()->Float32SubPreserveNan(), a, b);
+  }
   Node* Float32Mul(Node* a, Node* b) {
     return AddNode(machine()->Float32Mul(), a, b);
   }
@@ -419,6 +440,9 @@
   Node* Float64Sub(Node* a, Node* b) {
     return AddNode(machine()->Float64Sub(), a, b);
   }
+  Node* Float64SubPreserveNan(Node* a, Node* b) {
+    return AddNode(machine()->Float64SubPreserveNan(), a, b);
+  }
   Node* Float64Mul(Node* a, Node* b) {
     return AddNode(machine()->Float64Mul(), a, b);
   }
@@ -455,6 +479,12 @@
   }
 
   // Conversions.
+  Node* BitcastWordToTagged(Node* a) {
+    return AddNode(machine()->BitcastWordToTagged(), a);
+  }
+  Node* TruncateFloat64ToWord32(Node* a) {
+    return AddNode(machine()->TruncateFloat64ToWord32(), a);
+  }
   Node* ChangeFloat32ToFloat64(Node* a) {
     return AddNode(machine()->ChangeFloat32ToFloat64(), a);
   }
@@ -500,12 +530,12 @@
   Node* TruncateFloat64ToFloat32(Node* a) {
     return AddNode(machine()->TruncateFloat64ToFloat32(), a);
   }
-  Node* TruncateFloat64ToInt32(TruncationMode mode, Node* a) {
-    return AddNode(machine()->TruncateFloat64ToInt32(mode), a);
-  }
   Node* TruncateInt64ToInt32(Node* a) {
     return AddNode(machine()->TruncateInt64ToInt32(), a);
   }
+  Node* RoundFloat64ToInt32(Node* a) {
+    return AddNode(machine()->RoundFloat64ToInt32(), a);
+  }
   Node* RoundInt32ToFloat32(Node* a) {
     return AddNode(machine()->RoundInt32ToFloat32(), a);
   }
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index f2160f5..6746719 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -44,39 +44,15 @@
 
 }  // namespace
 
-
-void RegisterAllocatorVerifier::VerifyInput(
-    const OperandConstraint& constraint) {
-  CHECK_NE(kSameAsFirst, constraint.type_);
-  if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
-    CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
-             constraint.virtual_register_);
-  }
-}
-
-
-void RegisterAllocatorVerifier::VerifyTemp(
-    const OperandConstraint& constraint) {
-  CHECK_NE(kSameAsFirst, constraint.type_);
-  CHECK_NE(kImmediate, constraint.type_);
-  CHECK_NE(kExplicit, constraint.type_);
-  CHECK_NE(kConstant, constraint.type_);
-}
-
-
-void RegisterAllocatorVerifier::VerifyOutput(
-    const OperandConstraint& constraint) {
-  CHECK_NE(kImmediate, constraint.type_);
-  CHECK_NE(kExplicit, constraint.type_);
-  CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
-           constraint.virtual_register_);
-}
-
-
 RegisterAllocatorVerifier::RegisterAllocatorVerifier(
     Zone* zone, const RegisterConfiguration* config,
     const InstructionSequence* sequence)
-    : zone_(zone), config_(config), sequence_(sequence), constraints_(zone) {
+    : zone_(zone),
+      config_(config),
+      sequence_(sequence),
+      constraints_(zone),
+      assessments_(zone),
+      outstanding_assessments_(zone) {
   constraints_.reserve(sequence->instructions().size());
   // TODO(dcarney): model unique constraints.
   // Construct OperandConstraints for all InstructionOperands, eliminating
@@ -111,6 +87,30 @@
   }
 }
 
+void RegisterAllocatorVerifier::VerifyInput(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kSameAsFirst, constraint.type_);
+  if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
+    CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
+             constraint.virtual_register_);
+  }
+}
+
+void RegisterAllocatorVerifier::VerifyTemp(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kSameAsFirst, constraint.type_);
+  CHECK_NE(kImmediate, constraint.type_);
+  CHECK_NE(kExplicit, constraint.type_);
+  CHECK_NE(kConstant, constraint.type_);
+}
+
+void RegisterAllocatorVerifier::VerifyOutput(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kImmediate, constraint.type_);
+  CHECK_NE(kExplicit, constraint.type_);
+  CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
+           constraint.virtual_register_);
+}
 
 void RegisterAllocatorVerifier::VerifyAssignment() {
   CHECK(sequence()->instructions().size() == constraints()->size());
@@ -138,7 +138,6 @@
   }
 }
 
-
 void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
                                                 OperandConstraint* constraint) {
   constraint->value_ = kMinInt;
@@ -204,7 +203,6 @@
   }
 }
 
-
 void RegisterAllocatorVerifier::CheckConstraint(
     const InstructionOperand* op, const OperandConstraint* constraint) {
   switch (constraint->type_) {
@@ -226,7 +224,7 @@
       CHECK(op->IsRegister());
       return;
     case kDoubleRegister:
-      CHECK(op->IsDoubleRegister());
+      CHECK(op->IsFPRegister());
       return;
     case kExplicit:
       CHECK(op->IsExplicit());
@@ -238,7 +236,7 @@
                constraint->value_);
       return;
     case kFixedDoubleRegister:
-      CHECK(op->IsDoubleRegister());
+      CHECK(op->IsFPRegister());
       CHECK_EQ(LocationOperand::cast(op)->GetDoubleRegister().code(),
                constraint->value_);
       return;
@@ -250,13 +248,13 @@
       CHECK(op->IsStackSlot());
       return;
     case kDoubleSlot:
-      CHECK(op->IsDoubleStackSlot());
+      CHECK(op->IsFPStackSlot());
       return;
     case kNone:
       CHECK(op->IsRegister() || op->IsStackSlot());
       return;
     case kNoneDouble:
-      CHECK(op->IsDoubleRegister() || op->IsDoubleStackSlot());
+      CHECK(op->IsFPRegister() || op->IsFPStackSlot());
       return;
     case kSameAsFirst:
       CHECK(false);
@@ -264,457 +262,235 @@
   }
 }
 
-namespace {
-
-typedef RpoNumber Rpo;
-
-static const int kInvalidVreg = InstructionOperand::kInvalidVirtualRegister;
-
-struct PhiData : public ZoneObject {
-  PhiData(Rpo definition_rpo, const PhiInstruction* phi, int first_pred_vreg,
-          const PhiData* first_pred_phi, Zone* zone)
-      : definition_rpo(definition_rpo),
-        virtual_register(phi->virtual_register()),
-        first_pred_vreg(first_pred_vreg),
-        first_pred_phi(first_pred_phi),
-        operands(zone) {
-    operands.reserve(phi->operands().size());
-    operands.insert(operands.begin(), phi->operands().begin(),
-                    phi->operands().end());
-  }
-  const Rpo definition_rpo;
-  const int virtual_register;
-  const int first_pred_vreg;
-  const PhiData* first_pred_phi;
-  IntVector operands;
-};
-
-class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
- public:
-  explicit PhiMap(Zone* zone) : ZoneMap<int, PhiData*>(zone) {}
-};
-
-struct OperandLess {
-  bool operator()(const InstructionOperand* a,
-                  const InstructionOperand* b) const {
-    return a->CompareCanonicalized(*b);
-  }
-};
-
-class OperandMap : public ZoneObject {
- public:
-  struct MapValue : public ZoneObject {
-    MapValue()
-        : incoming(nullptr),
-          define_vreg(kInvalidVreg),
-          use_vreg(kInvalidVreg),
-          succ_vreg(kInvalidVreg) {}
-    MapValue* incoming;  // value from first predecessor block.
-    int define_vreg;     // valid if this value was defined in this block.
-    int use_vreg;        // valid if this value was used in this block.
-    int succ_vreg;       // valid if propagated back from successor block.
-  };
-
-  class Map
-      : public ZoneMap<const InstructionOperand*, MapValue*, OperandLess> {
-   public:
-    explicit Map(Zone* zone)
-        : ZoneMap<const InstructionOperand*, MapValue*, OperandLess>(zone) {}
-
-    // Remove all entries with keys not in other.
-    void Intersect(const Map& other) {
-      if (this->empty()) return;
-      auto it = this->begin();
-      OperandLess less;
-      for (const std::pair<const InstructionOperand*, MapValue*>& o : other) {
-        while (less(it->first, o.first)) {
-          this->erase(it++);
-          if (it == this->end()) return;
-        }
-        if (it->first->EqualsCanonicalized(*o.first)) {
-          ++it;
-          if (it == this->end()) return;
-        } else {
-          CHECK(less(o.first, it->first));
-        }
-      }
-    }
-  };
-
-  explicit OperandMap(Zone* zone) : map_(zone) {}
-
-  Map& map() { return map_; }
-
-  void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
-    // Compute outgoing mappings.
-    Map to_insert(zone);
-    for (const MoveOperands* move : *moves) {
-      if (move->IsEliminated()) continue;
-      auto cur = map().find(&move->source());
-      CHECK(cur != map().end());
-      auto res =
-          to_insert.insert(std::make_pair(&move->destination(), cur->second));
-      // Ensure injectivity of moves.
-      CHECK(res.second);
-    }
-    // Drop current mappings.
-    for (const MoveOperands* move : *moves) {
-      if (move->IsEliminated()) continue;
-      auto cur = map().find(&move->destination());
-      if (cur != map().end()) map().erase(cur);
-    }
-    // Insert new values.
-    map().insert(to_insert.begin(), to_insert.end());
-  }
-
-  void RunGaps(Zone* zone, const Instruction* instr) {
-    for (int i = Instruction::FIRST_GAP_POSITION;
-         i <= Instruction::LAST_GAP_POSITION; i++) {
-      Instruction::GapPosition inner_pos =
-          static_cast<Instruction::GapPosition>(i);
-      const ParallelMove* move = instr->GetParallelMove(inner_pos);
-      if (move == nullptr) continue;
-      RunParallelMoves(zone, move);
-    }
-  }
-
-  void Drop(const InstructionOperand* op) {
-    auto it = map().find(op);
-    if (it != map().end()) map().erase(it);
-  }
-
-  void DropRegisters(const RegisterConfiguration* config) {
-    // TODO(dcarney): sort map by kind and drop range.
-    for (auto it = map().begin(); it != map().end();) {
-      const InstructionOperand* op = it->first;
-      if (op->IsRegister() || op->IsDoubleRegister()) {
-        map().erase(it++);
-      } else {
-        ++it;
-      }
-    }
-  }
-
-  MapValue* Define(Zone* zone, const InstructionOperand* op,
-                   int virtual_register) {
-    MapValue* value = new (zone) MapValue();
-    value->define_vreg = virtual_register;
-    auto res = map().insert(std::make_pair(op, value));
-    if (!res.second) res.first->second = value;
-    return value;
-  }
-
-  void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
-    auto it = map().find(op);
-    CHECK(it != map().end());
-    MapValue* v = it->second;
-    if (v->define_vreg != kInvalidVreg) {
-      CHECK_EQ(v->define_vreg, use_vreg);
-    }
-    // Already used this vreg in this block.
-    if (v->use_vreg != kInvalidVreg) {
-      CHECK_EQ(v->use_vreg, use_vreg);
-      return;
-    }
-    if (!initial_pass) {
-      // A value may be defined and used in this block or the use must have
-      // propagated up.
-      if (v->succ_vreg != kInvalidVreg) {
-        CHECK_EQ(v->succ_vreg, use_vreg);
-      } else {
-        CHECK_EQ(v->define_vreg, use_vreg);
-      }
-      // Mark the use.
-      it->second->use_vreg = use_vreg;
-      return;
-    }
-    // Go up block list and ensure the correct definition is reached.
-    for (; v != nullptr; v = v->incoming) {
-      // Value unused in block.
-      if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
-        continue;
-      }
-      // Found correct definition or use.
-      CHECK(v->define_vreg == use_vreg || v->use_vreg == use_vreg);
-      // Mark the use.
-      it->second->use_vreg = use_vreg;
-      return;
-    }
-    // Use of a non-phi value without definition.
-    CHECK(false);
-  }
-
-  void UsePhi(const InstructionOperand* op, const PhiData* phi,
-              bool initial_pass) {
-    auto it = map().find(op);
-    CHECK(it != map().end());
-    MapValue* v = it->second;
-    int use_vreg = phi->virtual_register;
-    // Phis are not defined.
-    CHECK_EQ(kInvalidVreg, v->define_vreg);
-    // Already used this vreg in this block.
-    if (v->use_vreg != kInvalidVreg) {
-      CHECK_EQ(v->use_vreg, use_vreg);
-      return;
-    }
-    if (!initial_pass) {
-      // A used phi must have propagated its use to a predecessor.
-      CHECK_EQ(v->succ_vreg, use_vreg);
-      // Mark the use.
-      v->use_vreg = use_vreg;
-      return;
-    }
-    // Go up the block list starting at the first predecessor and ensure this
-    // phi has a correct use or definition.
-    for (v = v->incoming; v != nullptr; v = v->incoming) {
-      // Value unused in block.
-      if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
-        continue;
-      }
-      // Found correct definition or use.
-      if (v->define_vreg != kInvalidVreg) {
-        CHECK(v->define_vreg == phi->first_pred_vreg);
-      } else if (v->use_vreg != phi->first_pred_vreg) {
-        // Walk the phi chain, hunting for a matching phi use.
-        const PhiData* p = phi;
-        for (; p != nullptr; p = p->first_pred_phi) {
-          if (p->virtual_register == v->use_vreg) break;
-        }
-        CHECK(p);
-      }
-      // Mark the use.
-      it->second->use_vreg = use_vreg;
-      return;
-    }
-    // Use of a phi value without definition.
-    UNREACHABLE();
-  }
-
- private:
-  Map map_;
-  DISALLOW_COPY_AND_ASSIGN(OperandMap);
-};
-
-}  // namespace
-
-
-class RegisterAllocatorVerifier::BlockMaps {
- public:
-  BlockMaps(Zone* zone, const InstructionSequence* sequence)
-      : zone_(zone),
-        sequence_(sequence),
-        phi_map_guard_(sequence->VirtualRegisterCount(), zone),
-        phi_map_(zone),
-        incoming_maps_(zone),
-        outgoing_maps_(zone) {
-    InitializePhis();
-    InitializeOperandMaps();
-  }
-
-  bool IsPhi(int virtual_register) {
-    return phi_map_guard_.Contains(virtual_register);
-  }
-
-  const PhiData* GetPhi(int virtual_register) {
-    auto it = phi_map_.find(virtual_register);
-    CHECK(it != phi_map_.end());
-    return it->second;
-  }
-
-  OperandMap* InitializeIncoming(size_t block_index, bool initial_pass) {
-    return initial_pass ? InitializeFromFirstPredecessor(block_index)
-                        : InitializeFromIntersection(block_index);
-  }
-
-  void PropagateUsesBackwards() {
-    typedef std::set<size_t, std::greater<size_t>, zone_allocator<size_t>>
-        BlockIds;
-    BlockIds block_ids((BlockIds::key_compare()),
-                       zone_allocator<size_t>(zone()));
-    // First ensure that incoming contains only keys in all predecessors.
-    for (const InstructionBlock* block : sequence()->instruction_blocks()) {
-      size_t index = block->rpo_number().ToSize();
-      block_ids.insert(index);
-      OperandMap::Map& succ_map = incoming_maps_[index]->map();
-      for (size_t i = 0; i < block->PredecessorCount(); ++i) {
-        RpoNumber pred_rpo = block->predecessors()[i];
-        succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
-      }
-    }
-    // Back propagation fixpoint.
-    while (!block_ids.empty()) {
-      // Pop highest block_id.
-      auto block_id_it = block_ids.begin();
-      const size_t succ_index = *block_id_it;
-      block_ids.erase(block_id_it);
-      // Propagate uses back to their definition blocks using succ_vreg.
-      const InstructionBlock* block =
-          sequence()->instruction_blocks()[succ_index];
-      OperandMap::Map& succ_map = incoming_maps_[succ_index]->map();
-      for (size_t i = 0; i < block->PredecessorCount(); ++i) {
-        for (auto& succ_val : succ_map) {
-          // An incoming map contains no defines.
-          CHECK_EQ(kInvalidVreg, succ_val.second->define_vreg);
-          // Compute succ_vreg.
-          int succ_vreg = succ_val.second->succ_vreg;
-          if (succ_vreg == kInvalidVreg) {
-            succ_vreg = succ_val.second->use_vreg;
-            // Initialize succ_vreg in back propagation chain.
-            succ_val.second->succ_vreg = succ_vreg;
-          }
-          if (succ_vreg == kInvalidVreg) continue;
-          // May need to transition phi.
-          if (IsPhi(succ_vreg)) {
-            const PhiData* phi = GetPhi(succ_vreg);
-            if (phi->definition_rpo.ToSize() == succ_index) {
-              // phi definition block, transition to pred value.
-              succ_vreg = phi->operands[i];
-            }
-          }
-          // Push succ_vreg up to all predecessors.
-          RpoNumber pred_rpo = block->predecessors()[i];
-          OperandMap::Map& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
-          auto& pred_val = *pred_map.find(succ_val.first);
-          if (pred_val.second->use_vreg != kInvalidVreg) {
-            CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
-          }
-          if (pred_val.second->define_vreg != kInvalidVreg) {
-            CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
-          }
-          if (pred_val.second->succ_vreg != kInvalidVreg) {
-            if (succ_vreg != pred_val.second->succ_vreg) {
-              // When a block introduces 2 identical phis A and B, and both are
-              // operands to other phis C and D, and we optimized the moves
-              // defining A or B such that they now appear in the block defining
-              // A and B, the back propagation will get confused when visiting
-              // upwards from C and D. The operand in the block defining A and B
-              // will be attributed to C (or D, depending which of these is
-              // visited first).
-              CHECK(IsPhi(pred_val.second->succ_vreg));
-              CHECK(IsPhi(succ_vreg));
-              const PhiData* current_phi = GetPhi(succ_vreg);
-              const PhiData* assigned_phi = GetPhi(pred_val.second->succ_vreg);
-              CHECK_EQ(current_phi->operands.size(),
-                       assigned_phi->operands.size());
-              CHECK_EQ(current_phi->definition_rpo,
-                       assigned_phi->definition_rpo);
-              for (size_t i = 0; i < current_phi->operands.size(); ++i) {
-                CHECK_EQ(current_phi->operands[i], assigned_phi->operands[i]);
-              }
-            }
-          } else {
-            pred_val.second->succ_vreg = succ_vreg;
-            block_ids.insert(pred_rpo.ToSize());
-          }
-        }
-      }
-    }
-    // Clear uses and back links for second pass.
-    for (OperandMap* operand_map : incoming_maps_) {
-      for (auto& succ_val : operand_map->map()) {
-        succ_val.second->incoming = nullptr;
-        succ_val.second->use_vreg = kInvalidVreg;
-      }
-    }
-  }
-
- private:
-  OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
-    OperandMap* to_init = outgoing_maps_[block_index];
-    CHECK(to_init->map().empty());
-    const InstructionBlock* block =
-        sequence()->instruction_blocks()[block_index];
-    if (block->predecessors().empty()) return to_init;
-    size_t predecessor_index = block->predecessors()[0].ToSize();
-    // Ensure not a backedge.
-    CHECK(predecessor_index < block->rpo_number().ToSize());
-    OperandMap* incoming = outgoing_maps_[predecessor_index];
-    // Copy map and replace values.
-    to_init->map() = incoming->map();
-    for (auto& it : to_init->map()) {
-      OperandMap::MapValue* incoming = it.second;
-      it.second = new (zone()) OperandMap::MapValue();
-      it.second->incoming = incoming;
-    }
-    // Copy to incoming map for second pass.
-    incoming_maps_[block_index]->map() = to_init->map();
-    return to_init;
-  }
-
-  OperandMap* InitializeFromIntersection(size_t block_index) {
-    return incoming_maps_[block_index];
-  }
-
-  void InitializeOperandMaps() {
-    size_t block_count = sequence()->instruction_blocks().size();
-    incoming_maps_.reserve(block_count);
-    outgoing_maps_.reserve(block_count);
-    for (size_t i = 0; i < block_count; ++i) {
-      incoming_maps_.push_back(new (zone()) OperandMap(zone()));
-      outgoing_maps_.push_back(new (zone()) OperandMap(zone()));
-    }
-  }
-
-  void InitializePhis() {
-    const size_t block_count = sequence()->instruction_blocks().size();
-    for (size_t block_index = 0; block_index < block_count; ++block_index) {
-      const InstructionBlock* block =
-          sequence()->instruction_blocks()[block_index];
-      for (const PhiInstruction* phi : block->phis()) {
-        int first_pred_vreg = phi->operands()[0];
-        const PhiData* first_pred_phi = nullptr;
-        if (IsPhi(first_pred_vreg)) {
-          first_pred_phi = GetPhi(first_pred_vreg);
-          first_pred_vreg = first_pred_phi->first_pred_vreg;
-        }
-        CHECK(!IsPhi(first_pred_vreg));
-        PhiData* phi_data = new (zone()) PhiData(
-            block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
-        auto res =
-            phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
-        CHECK(res.second);
-        phi_map_guard_.Add(phi->virtual_register());
-      }
-    }
-  }
-
-  typedef ZoneVector<OperandMap*> OperandMaps;
-  typedef ZoneVector<PhiData*> PhiVector;
-
-  Zone* zone() const { return zone_; }
-  const InstructionSequence* sequence() const { return sequence_; }
-
-  Zone* const zone_;
-  const InstructionSequence* const sequence_;
-  BitVector phi_map_guard_;
-  PhiMap phi_map_;
-  OperandMaps incoming_maps_;
-  OperandMaps outgoing_maps_;
-};
-
-
-void RegisterAllocatorVerifier::VerifyGapMoves() {
-  BlockMaps block_maps(zone(), sequence());
-  VerifyGapMoves(&block_maps, true);
-  block_maps.PropagateUsesBackwards();
-  VerifyGapMoves(&block_maps, false);
+void BlockAssessments::PerformMoves(const Instruction* instruction) {
+  const ParallelMove* first =
+      instruction->GetParallelMove(Instruction::GapPosition::START);
+  PerformParallelMoves(first);
+  const ParallelMove* last =
+      instruction->GetParallelMove(Instruction::GapPosition::END);
+  PerformParallelMoves(last);
 }
 
+void BlockAssessments::PerformParallelMoves(const ParallelMove* moves) {
+  if (moves == nullptr) return;
 
-// Compute and verify outgoing values for every block.
-void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
-                                               bool initial_pass) {
+  CHECK(map_for_moves_.empty());
+  for (MoveOperands* move : *moves) {
+    if (move->IsEliminated() || move->IsRedundant()) continue;
+    auto it = map_.find(move->source());
+    // The RHS of a parallel move should have been already assessed.
+    CHECK(it != map_.end());
+    // The LHS of a parallel move should not have been assigned in this
+    // parallel move.
+    CHECK(map_for_moves_.find(move->destination()) == map_for_moves_.end());
+    // Copy the assessment to the destination.
+    map_for_moves_[move->destination()] = it->second;
+  }
+  for (auto pair : map_for_moves_) {
+    map_[pair.first] = pair.second;
+  }
+  map_for_moves_.clear();
+}
+
+void BlockAssessments::DropRegisters() {
+  for (auto iterator = map().begin(), end = map().end(); iterator != end;) {
+    auto current = iterator;
+    ++iterator;
+    InstructionOperand op = current->first;
+    if (op.IsAnyRegister()) map().erase(current);
+  }
+}
+
+BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
+    const InstructionBlock* block) {
+  RpoNumber current_block_id = block->rpo_number();
+
+  BlockAssessments* ret = new (zone()) BlockAssessments(zone());
+  if (block->PredecessorCount() == 0) {
+    // TODO(mtrofin): the following check should hold, however, in certain
+    // unit tests it is invalidated by the last block. Investigate and
+    // normalize the CFG.
+    // CHECK(current_block_id.ToInt() == 0);
+    // The phi size test below is because we can, technically, have phi
+    // instructions with one argument. Some tests expose that, too.
+  } else if (block->PredecessorCount() == 1 && block->phis().size() == 0) {
+    const BlockAssessments* prev_block = assessments_[block->predecessors()[0]];
+    ret->CopyFrom(prev_block);
+  } else {
+    for (RpoNumber pred_id : block->predecessors()) {
+      // For every operand coming from any of the predecessors, create an
+      // Unfinalized assessment.
+      auto iterator = assessments_.find(pred_id);
+      if (iterator == assessments_.end()) {
+        // This block is the head of a loop, and this predecessor is the
+        // loopback
+        // arc.
+        // Validate this is a loop case, otherwise the CFG is malformed.
+        CHECK(pred_id >= current_block_id);
+        CHECK(block->IsLoopHeader());
+        continue;
+      }
+      const BlockAssessments* pred_assessments = iterator->second;
+      CHECK_NOT_NULL(pred_assessments);
+      for (auto pair : pred_assessments->map()) {
+        InstructionOperand operand = pair.first;
+        if (ret->map().find(operand) == ret->map().end()) {
+          ret->map().insert(std::make_pair(
+              operand, new (zone()) PendingAssessment(block, operand)));
+        }
+      }
+    }
+  }
+  return ret;
+}
+
+void RegisterAllocatorVerifier::ValidatePendingAssessment(
+    RpoNumber block_id, InstructionOperand op,
+    BlockAssessments* current_assessments, const PendingAssessment* assessment,
+    int virtual_register) {
+  // When validating a pending assessment, it is possible some of the
+  // assessments
+  // for the original operand (the one where the assessment was created for
+  // first) are also pending. To avoid recursion, we use a work list. To
+  // deal with cycles, we keep a set of seen nodes.
+  ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(zone());
+  ZoneSet<RpoNumber> seen(zone());
+  worklist.push(std::make_pair(assessment, virtual_register));
+  seen.insert(block_id);
+
+  while (!worklist.empty()) {
+    auto work = worklist.front();
+    const PendingAssessment* current_assessment = work.first;
+    int current_virtual_register = work.second;
+    InstructionOperand current_operand = current_assessment->operand();
+    worklist.pop();
+
+    const InstructionBlock* origin = current_assessment->origin();
+    CHECK(origin->PredecessorCount() > 1 || origin->phis().size() > 0);
+
+    // Check if the virtual register is a phi first, instead of relying on
+    // the incoming assessments. In particular, this handles the case
+    // v1 = phi v0 v0, which structurally is identical to v0 having been
+    // defined at the top of a diamond, and arriving at the node joining the
+    // diamond's branches.
+    const PhiInstruction* phi = nullptr;
+    for (const PhiInstruction* candidate : origin->phis()) {
+      if (candidate->virtual_register() == current_virtual_register) {
+        phi = candidate;
+        break;
+      }
+    }
+
+    int op_index = 0;
+    for (RpoNumber pred : origin->predecessors()) {
+      int expected =
+          phi != nullptr ? phi->operands()[op_index] : current_virtual_register;
+
+      ++op_index;
+      auto pred_assignment = assessments_.find(pred);
+      if (pred_assignment == assessments_.end()) {
+        CHECK(origin->IsLoopHeader());
+        auto todo_iter = outstanding_assessments_.find(pred);
+        DelayedAssessments* set = nullptr;
+        if (todo_iter == outstanding_assessments_.end()) {
+          set = new (zone()) DelayedAssessments(zone());
+          outstanding_assessments_.insert(std::make_pair(pred, set));
+        } else {
+          set = todo_iter->second;
+        }
+        set->AddDelayedAssessment(current_operand, expected);
+        continue;
+      }
+
+      const BlockAssessments* pred_assessments = pred_assignment->second;
+      auto found_contribution = pred_assessments->map().find(current_operand);
+      CHECK(found_contribution != pred_assessments->map().end());
+      Assessment* contribution = found_contribution->second;
+
+      switch (contribution->kind()) {
+        case Final:
+          ValidateFinalAssessment(
+              block_id, current_operand, current_assessments,
+              FinalAssessment::cast(contribution), expected);
+          break;
+        case Pending: {
+          // This happens if we have a diamond feeding into another one, and
+          // the inner one never being used - other than for carrying the value.
+          const PendingAssessment* next = PendingAssessment::cast(contribution);
+          if (seen.find(pred) == seen.end()) {
+            worklist.push({next, expected});
+            seen.insert(pred);
+          }
+          // Note that we do not want to finalize pending assessments at the
+          // beginning of a block - which is the information we'd have
+          // available here. This is because this operand may be reused to
+          // define
+          // duplicate phis.
+          break;
+        }
+      }
+    }
+  }
+  // If everything checks out, we may make the assessment.
+  current_assessments->map()[op] =
+      new (zone()) FinalAssessment(virtual_register, assessment);
+}
+
+void RegisterAllocatorVerifier::ValidateFinalAssessment(
+    RpoNumber block_id, InstructionOperand op,
+    BlockAssessments* current_assessments, const FinalAssessment* assessment,
+    int virtual_register) {
+  if (assessment->virtual_register() == virtual_register) return;
+  // If we have 2 phis with the exact same operand list, and the first phi is
+  // used before the second one, via the operand incoming to the block,
+  // and the second one's operand is defined (via a parallel move) after the
+  // use, then the original operand will be assigned to the first phi. We
+  // then look at the original pending assessment to ascertain if op
+  // is virtual_register.
+  const PendingAssessment* old = assessment->original_pending_assessment();
+  CHECK_NOT_NULL(old);
+  ValidatePendingAssessment(block_id, op, current_assessments, old,
+                            virtual_register);
+}
+
+void RegisterAllocatorVerifier::ValidateUse(
+    RpoNumber block_id, BlockAssessments* current_assessments,
+    InstructionOperand op, int virtual_register) {
+  auto iterator = current_assessments->map().find(op);
+  // We should have seen this operand before.
+  CHECK(iterator != current_assessments->map().end());
+  Assessment* assessment = iterator->second;
+
+  switch (assessment->kind()) {
+    case Final:
+      ValidateFinalAssessment(block_id, op, current_assessments,
+                              FinalAssessment::cast(assessment),
+                              virtual_register);
+      break;
+    case Pending: {
+      const PendingAssessment* pending = PendingAssessment::cast(assessment);
+      ValidatePendingAssessment(block_id, op, current_assessments, pending,
+                                virtual_register);
+      break;
+    }
+  }
+}
+
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+  CHECK(assessments_.empty());
+  CHECK(outstanding_assessments_.empty());
   const size_t block_count = sequence()->instruction_blocks().size();
   for (size_t block_index = 0; block_index < block_count; ++block_index) {
-    OperandMap* current =
-        block_maps->InitializeIncoming(block_index, initial_pass);
     const InstructionBlock* block =
         sequence()->instruction_blocks()[block_index];
+    BlockAssessments* block_assessments = CreateForBlock(block);
+
     for (int instr_index = block->code_start(); instr_index < block->code_end();
          ++instr_index) {
       const InstructionConstraint& instr_constraint = constraints_[instr_index];
       const Instruction* instr = instr_constraint.instruction_;
-      current->RunGaps(zone(), instr);
+      block_assessments->PerformMoves(instr);
+
       const OperandConstraint* op_constraints =
           instr_constraint.operand_constraints_;
       size_t count = 0;
@@ -724,24 +500,19 @@
           continue;
         }
         int virtual_register = op_constraints[count].virtual_register_;
-        const InstructionOperand* op = instr->InputAt(i);
-        if (!block_maps->IsPhi(virtual_register)) {
-          current->Use(op, virtual_register, initial_pass);
-        } else {
-          const PhiData* phi = block_maps->GetPhi(virtual_register);
-          current->UsePhi(op, phi, initial_pass);
-        }
+        InstructionOperand op = *instr->InputAt(i);
+        ValidateUse(block->rpo_number(), block_assessments, op,
+                    virtual_register);
       }
       for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
-        current->Drop(instr->TempAt(i));
+        block_assessments->Drop(*instr->TempAt(i));
       }
       if (instr->IsCall()) {
-        current->DropRegisters(config());
+        block_assessments->DropRegisters();
       }
       for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
         int virtual_register = op_constraints[count].virtual_register_;
-        OperandMap::MapValue* value =
-            current->Define(zone(), instr->OutputAt(i), virtual_register);
+        block_assessments->AddDefinition(*instr->OutputAt(i), virtual_register);
         if (op_constraints[count].type_ == kRegisterAndSlot) {
           const AllocatedOperand* reg_op =
               AllocatedOperand::cast(instr->OutputAt(i));
@@ -749,13 +520,38 @@
           const AllocatedOperand* stack_op = AllocatedOperand::New(
               zone(), LocationOperand::LocationKind::STACK_SLOT, rep,
               op_constraints[i].spilled_slot_);
-          auto insert_result =
-              current->map().insert(std::make_pair(stack_op, value));
-          DCHECK(insert_result.second);
-          USE(insert_result);
+          block_assessments->AddDefinition(*stack_op, virtual_register);
         }
       }
     }
+    // Now commit the assessments for this block. If there are any delayed
+    // assessments, ValidatePendingAssessment should see this block, too.
+    assessments_[block->rpo_number()] = block_assessments;
+
+    auto todo_iter = outstanding_assessments_.find(block->rpo_number());
+    if (todo_iter == outstanding_assessments_.end()) continue;
+    DelayedAssessments* todo = todo_iter->second;
+    for (auto pair : todo->map()) {
+      InstructionOperand op = pair.first;
+      int vreg = pair.second;
+      auto found_op = block_assessments->map().find(op);
+      CHECK(found_op != block_assessments->map().end());
+      switch (found_op->second->kind()) {
+        case Final:
+          ValidateFinalAssessment(block->rpo_number(), op, block_assessments,
+                                  FinalAssessment::cast(found_op->second),
+                                  vreg);
+          break;
+        case Pending:
+          const PendingAssessment* pending =
+              PendingAssessment::cast(found_op->second);
+          ValidatePendingAssessment(block->rpo_number(), op, block_assessments,
+                                    pending, vreg);
+          block_assessments->map()[op] =
+              new (zone()) FinalAssessment(vreg, pending);
+          break;
+      }
+    }
   }
 }
 
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index f3ab54f..06d9029 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -14,6 +14,153 @@
 class InstructionOperand;
 class InstructionSequence;
 
+// The register allocator validator traverses instructions in the instruction
+// sequence, and verifies the correctness of machine operand substitutions of
+// virtual registers. It collects the virtual register instruction signatures
+// before register allocation. Then, after the register allocation pipeline
+// completes, it compares the operand substitutions against the pre-allocation
+// data.
+// At a high level, validation works as follows: we iterate through each block,
+// and, in a block, through each instruction; then:
+// - when an operand is the output of an instruction, we associate it to the
+// virtual register that the instruction sequence declares as its output. We
+// use the concept of "FinalAssessment" to model this.
+// - when an operand is used in an instruction, we check that the assessment
+// matches the expectation of the instruction
+// - moves simply copy the assessment over to the new operand
+// - blocks with more than one predecessor associate to each operand a "Pending"
+// assessment. The pending assessment remembers the operand and block where it
+// was created. Then, when the value is used (which may be as a different
+// operand, because of moves), we check that the virtual register at the use
+// site matches the definition of this pending operand: either the phi inputs
+// match, or, if it's not a phi, all the predecessors at the point the pending
+// assessment was defined have that operand assigned to the given virtual
+// register.
+// If a block is a loop header - so one or more of its predecessors are it or
+// below - we still treat uses of operands as above, but we record which operand
+// assessments haven't been made yet, and what virtual register they must
+// correspond to, and verify that when we are done with the respective
+// predecessor blocks.
+// This way, the algorithm always makes a final decision about the operands
+// in an instruction, ensuring convergence.
+// Operand assessments are recorded per block, as the result at the exit from
+// the block. When moving to a new block, we copy assessments from its single
+// predecessor, or, if the block has multiple predecessors, the mechanism was
+// described already.
+
+enum AssessmentKind { Final, Pending };
+
+class Assessment : public ZoneObject {
+ public:
+  AssessmentKind kind() const { return kind_; }
+
+ protected:
+  explicit Assessment(AssessmentKind kind) : kind_(kind) {}
+  AssessmentKind kind_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Assessment);
+};
+
+// PendingAssessments are associated to operands coming from the multiple
+// predecessors of a block. We only record the operand and the block, and
+// will determine if the way the operand is defined (from the predecessors)
+// matches a particular use. This handles scenarios where multiple phis are
+// defined with identical operands, and the move optimizer moved down the moves
+// separating the 2 phis in the block defining them.
+class PendingAssessment final : public Assessment {
+ public:
+  explicit PendingAssessment(const InstructionBlock* origin,
+                             InstructionOperand operand)
+      : Assessment(Pending), origin_(origin), operand_(operand) {}
+
+  static const PendingAssessment* cast(const Assessment* assessment) {
+    CHECK(assessment->kind() == Pending);
+    return static_cast<const PendingAssessment*>(assessment);
+  }
+
+  const InstructionBlock* origin() const { return origin_; }
+  InstructionOperand operand() const { return operand_; }
+
+ private:
+  const InstructionBlock* const origin_;
+  InstructionOperand operand_;
+
+  DISALLOW_COPY_AND_ASSIGN(PendingAssessment);
+};
+
+// FinalAssessmens are associated to operands that we know to be a certain
+// virtual register.
+class FinalAssessment final : public Assessment {
+ public:
+  explicit FinalAssessment(int virtual_register,
+                           const PendingAssessment* original_pending = nullptr)
+      : Assessment(Final),
+        virtual_register_(virtual_register),
+        original_pending_assessment_(original_pending) {}
+
+  int virtual_register() const { return virtual_register_; }
+  static const FinalAssessment* cast(const Assessment* assessment) {
+    CHECK(assessment->kind() == Final);
+    return static_cast<const FinalAssessment*>(assessment);
+  }
+
+  const PendingAssessment* original_pending_assessment() const {
+    return original_pending_assessment_;
+  }
+
+ private:
+  int virtual_register_;
+  const PendingAssessment* original_pending_assessment_;
+
+  DISALLOW_COPY_AND_ASSIGN(FinalAssessment);
+};
+
+struct OperandAsKeyLess {
+  bool operator()(const InstructionOperand& a,
+                  const InstructionOperand& b) const {
+    return a.CompareCanonicalized(b);
+  }
+};
+
+// Assessments associated with a basic block.
+class BlockAssessments : public ZoneObject {
+ public:
+  typedef ZoneMap<InstructionOperand, Assessment*, OperandAsKeyLess> OperandMap;
+  explicit BlockAssessments(Zone* zone)
+      : map_(zone), map_for_moves_(zone), zone_(zone) {}
+  void Drop(InstructionOperand operand) { map_.erase(operand); }
+  void DropRegisters();
+  void AddDefinition(InstructionOperand operand, int virtual_register) {
+    auto existent = map_.find(operand);
+    if (existent != map_.end()) {
+      // Drop the assignment
+      map_.erase(existent);
+    }
+    map_.insert(
+        std::make_pair(operand, new (zone_) FinalAssessment(virtual_register)));
+  }
+
+  void PerformMoves(const Instruction* instruction);
+  void PerformParallelMoves(const ParallelMove* moves);
+  void CopyFrom(const BlockAssessments* other) {
+    CHECK(map_.empty());
+    CHECK_NOT_NULL(other);
+    map_.insert(other->map_.begin(), other->map_.end());
+  }
+
+  OperandMap& map() { return map_; }
+  const OperandMap& map() const { return map_; }
+  void Print() const;
+
+ private:
+  OperandMap map_;
+  OperandMap map_for_moves_;
+  Zone* zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(BlockAssessments);
+};
+
 class RegisterAllocatorVerifier final : public ZoneObject {
  public:
   RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
@@ -53,10 +200,29 @@
     OperandConstraint* operand_constraints_;
   };
 
-  class BlockMaps;
-
   typedef ZoneVector<InstructionConstraint> Constraints;
 
+  class DelayedAssessments : public ZoneObject {
+   public:
+    explicit DelayedAssessments(Zone* zone) : map_(zone) {}
+
+    const ZoneMap<InstructionOperand, int, OperandAsKeyLess>& map() const {
+      return map_;
+    }
+
+    void AddDelayedAssessment(InstructionOperand op, int vreg) {
+      auto it = map_.find(op);
+      if (it == map_.end()) {
+        map_.insert(std::make_pair(op, vreg));
+      } else {
+        CHECK_EQ(it->second, vreg);
+      }
+    }
+
+   private:
+    ZoneMap<InstructionOperand, int, OperandAsKeyLess> map_;
+  };
+
   Zone* zone() const { return zone_; }
   const RegisterConfiguration* config() { return config_; }
   const InstructionSequence* sequence() const { return sequence_; }
@@ -70,13 +236,25 @@
                        OperandConstraint* constraint);
   void CheckConstraint(const InstructionOperand* op,
                        const OperandConstraint* constraint);
+  BlockAssessments* CreateForBlock(const InstructionBlock* block);
 
-  void VerifyGapMoves(BlockMaps* outgoing_mappings, bool initial_pass);
+  void ValidatePendingAssessment(RpoNumber block_id, InstructionOperand op,
+                                 BlockAssessments* current_assessments,
+                                 const PendingAssessment* assessment,
+                                 int virtual_register);
+  void ValidateFinalAssessment(RpoNumber block_id, InstructionOperand op,
+                               BlockAssessments* current_assessments,
+                               const FinalAssessment* assessment,
+                               int virtual_register);
+  void ValidateUse(RpoNumber block_id, BlockAssessments* current_assessments,
+                   InstructionOperand op, int virtual_register);
 
   Zone* const zone_;
   const RegisterConfiguration* config_;
   const InstructionSequence* const sequence_;
   Constraints constraints_;
+  ZoneMap<RpoNumber, BlockAssessments*> assessments_;
+  ZoneMap<RpoNumber, DelayedAssessments*> outstanding_assessments_;
 
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
 };
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 82faf75..4683672 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -26,23 +26,22 @@
 }
 
 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
-  return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
-                                  : cfg->num_general_registers();
+  return kind == FP_REGISTERS ? cfg->num_double_registers()
+                              : cfg->num_general_registers();
 }
 
 
 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
                                 RegisterKind kind) {
-  return kind == DOUBLE_REGISTERS
-             ? cfg->num_allocatable_aliased_double_registers()
-             : cfg->num_allocatable_general_registers();
+  return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
+                              : cfg->num_allocatable_general_registers();
 }
 
 
 const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
                                        RegisterKind kind) {
-  return kind == DOUBLE_REGISTERS ? cfg->allocatable_double_codes()
-                                  : cfg->allocatable_general_codes();
+  return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
+                              : cfg->allocatable_general_codes();
 }
 
 
@@ -81,7 +80,7 @@
 bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
   for (size_t i = 0; i < instr->OutputCount(); i++) {
     InstructionOperand* output = instr->OutputAt(i);
-    if (output->IsDoubleRegister() &&
+    if (output->IsFPRegister() &&
         LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
       return true;
     }
@@ -351,10 +350,10 @@
     case InstructionOperand::UNALLOCATED:
       return UsePositionHintType::kUnresolved;
     case InstructionOperand::ALLOCATED:
-      if (op.IsRegister() || op.IsDoubleRegister()) {
+      if (op.IsRegister() || op.IsFPRegister()) {
         return UsePositionHintType::kOperand;
       } else {
-        DCHECK(op.IsStackSlot() || op.IsDoubleStackSlot());
+        DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
         return UsePositionHintType::kNone;
       }
     case InstructionOperand::INVALID:
@@ -489,8 +488,7 @@
 
 
 RegisterKind LiveRange::kind() const {
-  return IsFloatingPoint(representation()) ? DOUBLE_REGISTERS
-                                           : GENERAL_REGISTERS;
+  return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
 }
 
 
@@ -728,11 +726,11 @@
     if (!pos->HasOperand()) continue;
     switch (pos->type()) {
       case UsePositionType::kRequiresSlot:
-        DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot());
+        DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
         InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
         break;
       case UsePositionType::kRequiresRegister:
-        DCHECK(op.IsRegister() || op.IsDoubleRegister());
+        DCHECK(op.IsRegister() || op.IsFPRegister());
       // Fall through.
       case UsePositionType::kAny:
         InstructionOperand::ReplaceWith(pos->operand(), &op);
@@ -1400,10 +1398,6 @@
       debug_name_(debug_name),
       config_(config),
       phi_map_(allocation_zone()),
-      allocatable_codes_(this->config()->num_general_registers(), -1,
-                         allocation_zone()),
-      allocatable_double_codes_(this->config()->num_double_registers(), -1,
-                                allocation_zone()),
       live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
       live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
       live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
@@ -1418,10 +1412,6 @@
       assigned_double_registers_(nullptr),
       virtual_register_count_(code->VirtualRegisterCount()),
       preassigned_slot_ranges_(zone) {
-  DCHECK(this->config()->num_general_registers() <=
-         RegisterConfiguration::kMaxGeneralRegisters);
-  DCHECK(this->config()->num_double_registers() <=
-         RegisterConfiguration::kMaxDoubleRegisters);
   assigned_registers_ = new (code_zone())
       BitVector(this->config()->num_general_registers(), code_zone());
   assigned_double_registers_ = new (code_zone())
@@ -1591,7 +1581,7 @@
 
 
 void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
-  if (kind == DOUBLE_REGISTERS) {
+  if (kind == FP_REGISTERS) {
     assigned_double_registers_->Add(index);
   } else {
     DCHECK(kind == GENERAL_REGISTERS);
@@ -1942,7 +1932,7 @@
                                   MachineRepresentation::kFloat64);
     DCHECK(result->IsFixed());
     result->set_assigned_register(index);
-    data()->MarkAllocated(DOUBLE_REGISTERS, index);
+    data()->MarkAllocated(FP_REGISTERS, index);
     data()->fixed_double_live_ranges()[index] = result;
   }
   return result;
@@ -1959,7 +1949,7 @@
   } else if (operand->IsRegister()) {
     return FixedLiveRangeFor(
         LocationOperand::cast(operand)->GetRegister().code());
-  } else if (operand->IsDoubleRegister()) {
+  } else if (operand->IsFPRegister()) {
     return FixedDoubleLiveRangeFor(
         LocationOperand::cast(operand)->GetDoubleRegister().code());
   } else {
@@ -2590,8 +2580,8 @@
 
 const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
     const {
-  return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges()
-                                    : data()->fixed_live_ranges();
+  return mode() == FP_REGISTERS ? data()->fixed_double_live_ranges()
+                                : data()->fixed_live_ranges();
 }
 
 
@@ -2616,7 +2606,7 @@
   inactive_live_ranges().reserve(8);
   // TryAllocateFreeReg and AllocateBlockedReg assume this
   // when allocating local arrays.
-  DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
+  DCHECK(RegisterConfiguration::kMaxFPRegisters >=
          this->data()->config()->num_general_registers());
 }
 
@@ -2813,7 +2803,7 @@
 
 
 bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
-  LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
+  LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
 
   for (int i = 0; i < num_registers(); i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
@@ -2899,8 +2889,8 @@
     return;
   }
 
-  LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
-  LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
+  LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
+  LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
 
   for (int i = 0; i < num_registers(); i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
@@ -2947,9 +2937,13 @@
   LifetimePosition pos = use_pos[reg];
 
   if (pos < register_use->pos()) {
-    // All registers are blocked before the first use that requires a register.
-    // Spill starting part of live range up to that use.
-    SpillBetween(current, current->Start(), register_use->pos());
+    if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
+                                                   register_use->pos())) {
+      SpillBetween(current, current->Start(), register_use->pos());
+    } else {
+      SetLiveRangeAssignedRegister(current, reg);
+      SplitAndSpillIntersecting(current);
+    }
     return;
   }
 
@@ -2994,6 +2988,8 @@
         // live-ranges: ranges are allocated in order of their start positions,
         // ranges are retired from active/inactive when the start of the
         // current live-range is larger than their end.
+        DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
+                                                          next_pos->pos()));
         SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
       }
       ActiveToHandled(range);
@@ -3092,7 +3088,7 @@
             ? range->TopLevel()->GetSpillRange()
             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
     bool merged = first_op_spill->TryMerge(spill_range);
-    CHECK(merged);
+    if (!merged) return false;
     Spill(range);
     return true;
   } else if (pos->pos() > range->Start().NextStart()) {
@@ -3101,7 +3097,7 @@
             ? range->TopLevel()->GetSpillRange()
             : data()->AssignSpillRangeToLiveRange(range->TopLevel());
     bool merged = first_op_spill->TryMerge(spill_range);
-    CHECK(merged);
+    if (!merged) return false;
     SpillBetween(range, range->Start(), pos->pos());
     DCHECK(UnhandledIsSorted());
     return true;
@@ -3405,7 +3401,8 @@
     BitVector* live = live_in_sets[block->rpo_number().ToInt()];
     BitVector::Iterator iterator(live);
     while (!iterator.Done()) {
-      LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
+      int vreg = iterator.Current();
+      LiveRangeBoundArray* array = finder.ArrayFor(vreg);
       for (const RpoNumber& pred : block->predecessors()) {
         FindResult result;
         const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
@@ -3622,6 +3619,7 @@
     worklist.push(iterator.Current());
   }
 
+  ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
   // Seek the deferred blocks that dominate locations requiring spill operands,
   // and spill there. We only need to spill at the start of such blocks.
   BitVector done_blocks(
@@ -3648,10 +3646,15 @@
 
         InstructionOperand pred_op = bound->range_->GetAssignedOperand();
 
-        data()->AddGapMove(spill_block->first_instruction_index(),
-                           Instruction::GapPosition::START, pred_op,
-                           spill_operand);
-        spill_block->mark_needs_frame();
+        RpoNumber spill_block_number = spill_block->rpo_number();
+        if (done_moves.find(std::make_pair(
+                spill_block_number, range->vreg())) == done_moves.end()) {
+          data()->AddGapMove(spill_block->first_instruction_index(),
+                             Instruction::GapPosition::START, pred_op,
+                             spill_operand);
+          done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
+          spill_block->mark_needs_frame();
+        }
       }
     }
   }
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index d6ed005..c67d60e 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -14,11 +14,7 @@
 namespace internal {
 namespace compiler {
 
-enum RegisterKind {
-  GENERAL_REGISTERS,
-  DOUBLE_REGISTERS
-};
-
+enum RegisterKind { GENERAL_REGISTERS, FP_REGISTERS };
 
 // This class represents a single point of a InstructionOperand's lifetime. For
 // each instruction there are four lifetime positions:
@@ -46,6 +42,14 @@
     return LifetimePosition(index * kStep + kHalfStep);
   }
 
+  static bool ExistsGapPositionBetween(LifetimePosition pos1,
+                                       LifetimePosition pos2) {
+    if (pos1 > pos2) std::swap(pos1, pos2);
+    LifetimePosition next(pos1.value_ + 1);
+    if (next.IsGapPosition()) return next < pos2;
+    return next.NextFullStart() < pos2;
+  }
+
   // Returns a numeric representation of this lifetime position.
   int value() const { return value_; }
 
@@ -238,11 +242,9 @@
 static const int32_t kUnassignedRegister =
     RegisterConfiguration::kMaxGeneralRegisters;
 
-
-static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxDoubleRegisters,
+static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxFPRegisters,
               "kUnassignedRegister too small");
 
-
 // Representation of a use position.
 class UsePosition final : public ZoneObject {
  public:
@@ -851,8 +853,6 @@
   const char* const debug_name_;
   const RegisterConfiguration* const config_;
   PhiMap phi_map_;
-  ZoneVector<int> allocatable_codes_;
-  ZoneVector<int> allocatable_double_codes_;
   ZoneVector<BitVector*> live_in_sets_;
   ZoneVector<BitVector*> live_out_sets_;
   ZoneVector<TopLevelLiveRange*> live_ranges_;
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index f59c8bc..180355d 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -188,12 +188,14 @@
   // Select the correct X -> Tagged operator.
   const Operator* op;
   if (output_rep == MachineRepresentation::kBit) {
-    op = simplified()->ChangeBitToBool();
+    op = simplified()->ChangeBitToTagged();
   } else if (IsWord(output_rep)) {
-    if (output_type->Is(Type::Unsigned32())) {
-      op = simplified()->ChangeUint32ToTagged();
+    if (output_type->Is(Type::Signed31())) {
+      op = simplified()->ChangeInt31ToTaggedSigned();
     } else if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeInt32ToTagged();
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = simplified()->ChangeUint32ToTagged();
     } else {
       return TypeError(node, output_rep, output_type,
                        MachineRepresentation::kTagged);
@@ -201,9 +203,24 @@
   } else if (output_rep ==
              MachineRepresentation::kFloat32) {  // float32 -> float64 -> tagged
     node = InsertChangeFloat32ToFloat64(node);
+    // TODO(bmeurer): Pass -0 hint to ChangeFloat64ToTagged.
     op = simplified()->ChangeFloat64ToTagged();
   } else if (output_rep == MachineRepresentation::kFloat64) {
-    op = simplified()->ChangeFloat64ToTagged();
+    if (output_type->Is(Type::Signed31())) {  // float64 -> int32 -> tagged
+      node = InsertChangeFloat64ToInt32(node);
+      op = simplified()->ChangeInt31ToTaggedSigned();
+    } else if (output_type->Is(
+                   Type::Signed32())) {  // float64 -> int32 -> tagged
+      node = InsertChangeFloat64ToInt32(node);
+      op = simplified()->ChangeInt32ToTagged();
+    } else if (output_type->Is(
+                   Type::Unsigned32())) {  // float64 -> uint32 -> tagged
+      node = InsertChangeFloat64ToUint32(node);
+      op = simplified()->ChangeUint32ToTagged();
+    } else {
+      // TODO(bmeurer): Pass -0 hint to ChangeFloat64ToTagged.
+      op = simplified()->ChangeFloat64ToTagged();
+    }
   } else {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kTagged);
@@ -253,7 +270,7 @@
       op = machine()->TruncateFloat64ToFloat32();
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
-    if (output_type->Is(Type::Number())) {
+    if (output_type->Is(Type::NumberOrUndefined())) {
       op = simplified()
                ->ChangeTaggedToFloat64();  // tagged -> float64 -> float32
       node = jsgraph()->graph()->NewNode(op, node);
@@ -305,7 +322,13 @@
       op = machine()->ChangeUint32ToFloat64();
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
-    if (output_type->Is(Type::Number())) {
+    if (output_type->Is(Type::Undefined())) {
+      return jsgraph()->Float64Constant(
+          std::numeric_limits<double>::quiet_NaN());
+    } else if (output_type->Is(Type::TaggedSigned())) {
+      node = InsertChangeTaggedSignedToInt32(node);
+      op = machine()->ChangeInt32ToFloat64();
+    } else if (output_type->Is(Type::NumberOrUndefined())) {
       op = simplified()->ChangeTaggedToFloat64();
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
@@ -348,7 +371,7 @@
     } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
     } else if (truncation.TruncatesToWord32()) {
-      op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+      op = machine()->TruncateFloat64ToWord32();
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
@@ -357,16 +380,17 @@
     } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
     } else if (truncation.TruncatesToWord32()) {
-      op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+      op = machine()->TruncateFloat64ToWord32();
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
-    if (output_type->Is(Type::Unsigned32())) {
+    if (output_type->Is(Type::TaggedSigned())) {
+      op = simplified()->ChangeTaggedSignedToInt32();
+    } else if (output_type->Is(Type::Unsigned32())) {
       op = simplified()->ChangeTaggedToUint32();
     } else if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedToInt32();
     } else if (truncation.TruncatesToWord32()) {
-      node = InsertChangeTaggedToFloat64(node);
-      op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+      op = simplified()->TruncateTaggedToWord32();
     }
   }
   if (op == nullptr) {
@@ -394,7 +418,7 @@
   // Select the correct X -> Bit operator.
   const Operator* op;
   if (output_rep == MachineRepresentation::kTagged) {
-    op = simplified()->ChangeBoolToBit();
+    op = simplified()->ChangeTaggedToBit();
   } else {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kBit);
@@ -530,6 +554,18 @@
   return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(), node);
 }
 
+Node* RepresentationChanger::InsertChangeFloat64ToUint32(Node* node) {
+  return jsgraph()->graph()->NewNode(machine()->ChangeFloat64ToUint32(), node);
+}
+
+Node* RepresentationChanger::InsertChangeFloat64ToInt32(Node* node) {
+  return jsgraph()->graph()->NewNode(machine()->ChangeFloat64ToInt32(), node);
+}
+
+Node* RepresentationChanger::InsertChangeTaggedSignedToInt32(Node* node) {
+  return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(),
+                                     node);
+}
 
 Node* RepresentationChanger::InsertChangeTaggedToFloat64(Node* node) {
   return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index 24e28f3..839335d 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -31,6 +31,9 @@
   bool TruncatesToWord32() const {
     return LessGeneral(kind_, TruncationKind::kWord32);
   }
+  bool TruncatesToFloat64() const {
+    return LessGeneral(kind_, TruncationKind::kFloat64);
+  }
   bool TruncatesNaNToZero() {
     return LessGeneral(kind_, TruncationKind::kWord32) ||
            LessGeneral(kind_, TruncationKind::kBool);
@@ -130,6 +133,9 @@
                   Type* output_type, MachineRepresentation use);
   Node* MakeTruncatedInt32Constant(double value);
   Node* InsertChangeFloat32ToFloat64(Node* node);
+  Node* InsertChangeFloat64ToInt32(Node* node);
+  Node* InsertChangeFloat64ToUint32(Node* node);
+  Node* InsertChangeTaggedSignedToInt32(Node* node);
   Node* InsertChangeTaggedToFloat64(Node* node);
 
   JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index 1d96856..fece596 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -67,8 +67,8 @@
 
   MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
     const size_t index = *first_index;
-    *mode = AddressingModeField::decode(instr_->opcode());
-    switch (*mode) {
+    if (mode) *mode = AddressingModeField::decode(instr_->opcode());
+    switch (AddressingModeField::decode(instr_->opcode())) {
       case kMode_None:
         break;
       case kMode_MRI:
@@ -82,13 +82,14 @@
     return MemOperand(r0);
   }
 
-  MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+  MemOperand MemoryOperand(AddressingMode* mode = NULL,
+                           size_t first_index = 0) {
     return MemoryOperand(mode, &first_index);
   }
 
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
   }
 
@@ -155,7 +156,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
                        Register value, Register scratch0, Register scratch1,
@@ -236,15 +238,10 @@
 #if V8_TARGET_ARCH_S390X
         case kS390_Add:
         case kS390_Sub:
-          return lt;
 #endif
         case kS390_AddWithOverflow32:
         case kS390_SubWithOverflow32:
-#if V8_TARGET_ARCH_S390X
-          return ne;
-#else
           return lt;
-#endif
         default:
           break;
       }
@@ -254,15 +251,10 @@
 #if V8_TARGET_ARCH_S390X
         case kS390_Add:
         case kS390_Sub:
-          return ge;
 #endif
         case kS390_AddWithOverflow32:
         case kS390_SubWithOverflow32:
-#if V8_TARGET_ARCH_S390X
-          return eq;
-#else
           return ge;
-#endif
         default:
           break;
       }
@@ -332,16 +324,16 @@
   } while (0)
 
 #if V8_TARGET_ARCH_S390X
-#define ASSEMBLE_ADD_WITH_OVERFLOW32()      \
-  do {                                      \
-    ASSEMBLE_BINOP(AddP, AddP);             \
-    __ TestIfInt32(i.OutputRegister(), r0); \
+#define ASSEMBLE_ADD_WITH_OVERFLOW32()                   \
+  do {                                                   \
+    ASSEMBLE_ADD_WITH_OVERFLOW();                        \
+    __ LoadAndTestP_ExtendSrc(kScratchReg, kScratchReg); \
   } while (0)
 
-#define ASSEMBLE_SUB_WITH_OVERFLOW32()      \
-  do {                                      \
-    ASSEMBLE_BINOP(SubP, SubP);             \
-    __ TestIfInt32(i.OutputRegister(), r0); \
+#define ASSEMBLE_SUB_WITH_OVERFLOW32()                   \
+  do {                                                   \
+    ASSEMBLE_SUB_WITH_OVERFLOW();                        \
+    __ LoadAndTestP_ExtendSrc(kScratchReg, kScratchReg); \
   } while (0)
 #else
 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
@@ -461,7 +453,6 @@
     __ asm_instr(value, operand);                        \
   } while (0)
 
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width)              \
   do {                                                             \
     DoubleRegister result = i.OutputDoubleRegister();              \
@@ -469,7 +460,6 @@
     AddressingMode mode = kMode_None;                              \
     MemOperand operand = i.MemoryOperand(&mode, index);            \
     Register offset = operand.rb();                                \
-    __ lgfr(offset, offset);                                       \
     if (HasRegisterInput(instr, 2)) {                              \
       __ CmpLogical32(offset, i.InputRegister(2));                 \
     } else {                                                       \
@@ -477,11 +467,11 @@
     }                                                              \
     auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
     __ bge(ool->entry());                                          \
+    __ CleanUInt32(offset);                                        \
     __ asm_instr(result, operand);                                 \
     __ bind(ool->exit());                                          \
   } while (0)
 
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
   do {                                                       \
     Register result = i.OutputRegister();                    \
@@ -489,7 +479,6 @@
     AddressingMode mode = kMode_None;                        \
     MemOperand operand = i.MemoryOperand(&mode, index);      \
     Register offset = operand.rb();                          \
-    __ lgfr(offset, offset);                                 \
     if (HasRegisterInput(instr, 2)) {                        \
       __ CmpLogical32(offset, i.InputRegister(2));           \
     } else {                                                 \
@@ -497,11 +486,11 @@
     }                                                        \
     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
     __ bge(ool->entry());                                    \
+    __ CleanUInt32(offset);                                  \
     __ asm_instr(result, operand);                           \
     __ bind(ool->exit());                                    \
   } while (0)
 
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
   do {                                                  \
     Label done;                                         \
@@ -509,7 +498,6 @@
     AddressingMode mode = kMode_None;                   \
     MemOperand operand = i.MemoryOperand(&mode, index); \
     Register offset = operand.rb();                     \
-    __ lgfr(offset, offset);                            \
     if (HasRegisterInput(instr, 2)) {                   \
       __ CmpLogical32(offset, i.InputRegister(2));      \
     } else {                                            \
@@ -517,11 +505,11 @@
     }                                                   \
     __ bge(&done);                                      \
     DoubleRegister value = i.InputDoubleRegister(3);    \
+    __ CleanUInt32(offset);                             \
     __ StoreFloat32(value, operand);                    \
     __ bind(&done);                                     \
   } while (0)
 
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
   do {                                                  \
     Label done;                                         \
@@ -530,7 +518,6 @@
     MemOperand operand = i.MemoryOperand(&mode, index); \
     DCHECK_EQ(kMode_MRR, mode);                         \
     Register offset = operand.rb();                     \
-    __ lgfr(offset, offset);                            \
     if (HasRegisterInput(instr, 2)) {                   \
       __ CmpLogical32(offset, i.InputRegister(2));      \
     } else {                                            \
@@ -538,11 +525,11 @@
     }                                                   \
     __ bge(&done);                                      \
     DoubleRegister value = i.InputDoubleRegister(3);    \
+    __ CleanUInt32(offset);                             \
     __ StoreDouble(value, operand);                     \
     __ bind(&done);                                     \
   } while (0)
 
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)       \
   do {                                                  \
     Label done;                                         \
@@ -550,7 +537,6 @@
     AddressingMode mode = kMode_None;                   \
     MemOperand operand = i.MemoryOperand(&mode, index); \
     Register offset = operand.rb();                     \
-    __ lgfr(offset, offset);                            \
     if (HasRegisterInput(instr, 2)) {                   \
       __ CmpLogical32(offset, i.InputRegister(2));      \
     } else {                                            \
@@ -558,6 +544,7 @@
     }                                                   \
     __ bge(&done);                                      \
     Register value = i.InputRegister(3);                \
+    __ CleanUInt32(offset);                             \
     __ asm_instr(value, operand);                       \
     __ bind(&done);                                     \
   } while (0)
@@ -566,8 +553,6 @@
   __ LeaveFrame(StackFrame::MANUAL);
 }
 
-void CodeGenerator::AssembleSetupStackPointer() {}
-
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -614,7 +599,8 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   S390OperandConverter i(this, instr);
   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
 
@@ -656,6 +642,14 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!instr->InputAt(0)->IsImmediate());
+      __ Jump(i.InputRegister(0));
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
@@ -735,7 +729,9 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -1306,7 +1302,7 @@
       break;
 #endif
     case kS390_Push:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ lay(sp, MemOperand(sp, -kDoubleSize));
         __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1318,7 +1314,7 @@
     case kS390_PushFrame: {
       int num_slots = i.InputInt32(1);
       __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ StoreDouble(i.InputDoubleRegister(0),
                  MemOperand(sp));
       } else {
@@ -1329,7 +1325,7 @@
     }
     case kS390_StoreToStackSlot: {
       int slot = i.InputInt32(1);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ StoreDouble(i.InputDoubleRegister(0),
                        MemOperand(sp, slot * kPointerSize));
       } else {
@@ -1555,6 +1551,9 @@
     case kS390_LoadWordS16:
       ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
       break;
+    case kS390_LoadWordU32:
+      ASSEMBLE_LOAD_INTEGER(LoadlW);
+      break;
     case kS390_LoadWordS32:
       ASSEMBLE_LOAD_INTEGER(LoadW);
       break;
@@ -1607,7 +1606,7 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
       break;
     case kCheckedLoadWord32:
-      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadW);
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
       break;
     case kCheckedLoadWord64:
 #if V8_TARGET_ARCH_S390X
@@ -1644,10 +1643,35 @@
     case kCheckedStoreFloat64:
       ASSEMBLE_CHECKED_STORE_DOUBLE();
       break;
+    case kAtomicLoadInt8:
+      __ LoadB(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kAtomicLoadUint8:
+      __ LoadlB(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kAtomicLoadInt16:
+      __ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kAtomicLoadUint16:
+      __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kAtomicLoadWord32:
+      __ LoadlW(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kAtomicStoreWord8:
+      __ StoreByte(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+      break;
+    case kAtomicStoreWord16:
+      __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+      break;
+    case kAtomicStoreWord32:
+      __ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+      break;
     default:
       UNREACHABLE();
       break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 // Assembles branches after an instruction.
@@ -1744,7 +1768,7 @@
   S390OperandConverter i(this, instr);
   Register input = i.InputRegister(0);
   for (size_t index = 2; index < instr->InputCount(); index += 2) {
-    __ CmpP(input, Operand(i.InputInt32(index + 0)));
+    __ Cmp32(input, Operand(i.InputInt32(index + 0)));
     __ beq(GetLabel(i.InputRpo(index + 1)));
   }
   AssembleArchJump(i.InputRpo(1));
@@ -1767,17 +1791,41 @@
   __ Jump(kScratchReg);
 }
 
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  return kSuccess;
 }
 
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::FinishFrame(Frame* frame) {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+
+  // Save callee-saved Double registers.
+  if (double_saves != 0) {
+    frame->AlignSavedCalleeRegisterSlots();
+    DCHECK(kNumCalleeSavedDoubles ==
+           base::bits::CountPopulation32(double_saves));
+    frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+                                            (kDoubleSize / kPointerSize));
+  }
+  // Save callee-saved registers.
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    // register save area does not include the fp or constant pool pointer.
+    const int num_saves = kNumCalleeSaved - 1;
+    DCHECK(num_saves == base::bits::CountPopulation32(saves));
+    frame->AllocateSavedCalleeRegisterSlots(num_saves);
+  }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
   if (frame_access_state()->has_frame()) {
@@ -1794,7 +1842,7 @@
     }
   }
 
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1805,15 +1853,12 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
-  if (double_saves != 0) {
-    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
-  }
-  if (stack_shrink_slots > 0) {
-    __ lay(sp, MemOperand(sp, -stack_shrink_slots * kPointerSize));
+  if (shrink_slots > 0) {
+    __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
   }
 
   // Save callee-saved Double registers.
@@ -1821,8 +1866,6 @@
     __ MultiPushDoubles(double_saves);
     DCHECK(kNumCalleeSavedDoubles ==
            base::bits::CountPopulation32(double_saves));
-    frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
-                                              (kDoubleSize / kPointerSize));
   }
 
   // Save callee-saved registers.
@@ -1830,10 +1873,6 @@
   if (saves != 0) {
     __ MultiPush(saves);
     // register save area does not include the fp or constant pool pointer.
-    const int num_saves =
-        kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
-    DCHECK(num_saves == base::bits::CountPopulation32(saves));
-    frame()->AllocateSavedCalleeRegisterSlots(num_saves);
   }
 }
 
@@ -1898,10 +1937,28 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          __ mov(dst, Operand(src.ToInt32()));
+#if V8_TARGET_ARCH_S390X
+          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#else
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#endif
+            __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+          } else {
+            __ mov(dst, Operand(src.ToInt32()));
+          }
           break;
         case Constant::kInt64:
+#if V8_TARGET_ARCH_S390X
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+            __ mov(dst, Operand(src.ToInt64(), src.rmode()));
+          } else {
+            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            __ mov(dst, Operand(src.ToInt64()));
+          }
+#else
           __ mov(dst, Operand(src.ToInt64()));
+#endif  // V8_TARGET_ARCH_S390X
           break;
         case Constant::kFloat32:
           __ Move(dst,
@@ -1935,7 +1992,7 @@
         __ StoreP(dst, g.ToMemOperand(destination), r0);
       }
     } else {
-      DoubleRegister dst = destination->IsDoubleRegister()
+      DoubleRegister dst = destination->IsFPRegister()
                                ? g.ToDoubleRegister(destination)
                                : kScratchDoubleReg;
       double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
@@ -1946,23 +2003,23 @@
         __ LoadDoubleLiteral(dst, value, kScratchReg);
       }
 
-      if (destination->IsDoubleStackSlot()) {
+      if (destination->IsFPStackSlot()) {
         __ StoreDouble(dst, g.ToMemOperand(destination));
       }
     }
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     DoubleRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       DoubleRegister dst = g.ToDoubleRegister(destination);
       __ Move(dst, src);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       __ StoreDouble(src, g.ToMemOperand(destination));
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     MemOperand src = g.ToMemOperand(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       __ LoadDouble(g.ToDoubleRegister(destination), src);
     } else {
       DoubleRegister temp = kScratchDoubleReg;
@@ -1996,7 +2053,7 @@
       __ StoreP(temp, dst);
     }
 #if V8_TARGET_ARCH_S390X
-  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+  } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
 #else
   } else if (source->IsStackSlot()) {
     DCHECK(destination->IsStackSlot());
@@ -2009,24 +2066,24 @@
     __ LoadP(temp_1, dst);
     __ StoreP(temp_0, dst);
     __ StoreP(temp_1, src);
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     DoubleRegister temp = kScratchDoubleReg;
     DoubleRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       DoubleRegister dst = g.ToDoubleRegister(destination);
       __ ldr(temp, src);
       __ ldr(src, dst);
       __ ldr(dst, temp);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       MemOperand dst = g.ToMemOperand(destination);
       __ ldr(temp, src);
       __ LoadDouble(src, dst);
       __ StoreDouble(temp, dst);
     }
 #if !V8_TARGET_ARCH_S390X
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPStackSlot());
     DoubleRegister temp_0 = kScratchDoubleReg;
     DoubleRegister temp_1 = d0;
     MemOperand src = g.ToMemOperand(source);
@@ -2049,10 +2106,6 @@
   }
 }
 
-void CodeGenerator::AddNopForSmiCodeInlining() {
-  // We do not insert nops for inlined Smi code.
-}
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
index a32f875..a54b2ed 100644
--- a/src/compiler/s390/instruction-codes-s390.h
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -126,6 +126,7 @@
   V(S390_LoadWordS16)              \
   V(S390_LoadWordU16)              \
   V(S390_LoadWordS32)              \
+  V(S390_LoadWordU32)              \
   V(S390_LoadWord64)               \
   V(S390_LoadFloat32)              \
   V(S390_LoadDouble)               \
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
index 2d98e11..d187227 100644
--- a/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -126,6 +126,7 @@
     case kS390_LoadWordS16:
     case kS390_LoadWordU16:
     case kS390_LoadWordS32:
+    case kS390_LoadWordU32:
     case kS390_LoadWord64:
     case kS390_LoadFloat32:
     case kS390_LoadDouble:
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index 8a4af5e..00782d1 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -182,11 +182,7 @@
     case MachineRepresentation::kTagged:  // Fall through.
 #endif
     case MachineRepresentation::kWord32:
-      opcode = kS390_LoadWordS32;
-#if V8_TARGET_ARCH_S390X
-      // TODO(john.yan): Remove this mode since s390 do not has this restriction
-      mode = kInt16Imm_4ByteAligned;
-#endif
+      opcode = kS390_LoadWordU32;
       break;
 #if V8_TARGET_ARCH_S390X
     case MachineRepresentation::kTagged:  // Fall through.
@@ -1042,14 +1038,12 @@
   VisitRR(this, kS390_DoubleToFloat32, node);
 }
 
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      return VisitRR(this, kArchTruncateDoubleToI, node);
-    case TruncationMode::kRoundToZero:
-      return VisitRR(this, kS390_DoubleToInt32, node);
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+  VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  VisitRR(this, kS390_DoubleToInt32, node);
 }
 
 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
@@ -1123,6 +1117,11 @@
   VisitRRR(this, kS390_SubFloat, node);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  S390OperandGenerator g(this);
+  VisitRRR(this, kS390_SubFloat, node);
+}
+
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   // TODO(mbrandy): detect multiply-subtract
   S390OperandGenerator g(this);
@@ -1148,6 +1147,10 @@
   VisitRRR(this, kS390_SubDouble, node);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  VisitRRR(this, kS390_SubDouble, node);
+}
+
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitRRR(this, kS390_MulFloat, node);
 }
@@ -1750,6 +1753,61 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  S390OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicLoadWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Emit(opcode | AddressingModeField::encode(kMode_MRR),
+       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  S390OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kAtomicStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kAtomicStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kAtomicStoreWord32;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  inputs[input_count++] = g.UseUniqueRegister(value);
+  inputs[input_count++] = g.UseUniqueRegister(base);
+  inputs[input_count++] = g.UseUniqueRegister(index);
+  Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
+       inputs);
+}
+
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index 4ac65e5..6bd1a17 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -199,11 +199,28 @@
   AddSuccessor(block, succ);
 }
 
+#if DEBUG
+namespace {
+
+bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) {
+  switch (opcode) {
+#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+    JS_OP_LIST(BUILD_BLOCK_JS_CASE)
+#undef BUILD_BLOCK_JS_CASE
+    case IrOpcode::kCall:
+      return true;
+    default:
+      return false;
+  }
+}
+
+}  // namespace
+#endif  // DEBUG
 
 void Schedule::AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
                        BasicBlock* exception_block) {
   DCHECK_EQ(BasicBlock::kNone, block->control());
-  DCHECK_EQ(IrOpcode::kCall, call->opcode());
+  DCHECK(IsPotentiallyThrowingCall(call->opcode()));
   block->set_control(BasicBlock::kCall);
   AddSuccessor(block, success_block);
   AddSuccessor(block, exception_block);
@@ -298,41 +315,87 @@
   SetControlInput(block, sw);
 }
 
-void Schedule::EnsureSplitEdgeForm() {
+void Schedule::EnsureCFGWellFormedness() {
   // Make a copy of all the blocks for the iteration, since adding the split
   // edges will allocate new blocks.
   BasicBlockVector all_blocks_copy(all_blocks_);
 
   // Insert missing split edge blocks.
   for (auto block : all_blocks_copy) {
-    if (block->PredecessorCount() > 1 && block != end_) {
-      for (auto current_pred = block->predecessors().begin();
-           current_pred != block->predecessors().end(); ++current_pred) {
-        BasicBlock* pred = *current_pred;
-        if (pred->SuccessorCount() > 1) {
-          // Found a predecessor block with multiple successors.
-          BasicBlock* split_edge_block = NewBasicBlock();
-          split_edge_block->set_control(BasicBlock::kGoto);
-          split_edge_block->successors().push_back(block);
-          split_edge_block->predecessors().push_back(pred);
-          split_edge_block->set_deferred(pred->deferred());
-          *current_pred = split_edge_block;
-          // Find a corresponding successor in the previous block, replace it
-          // with the split edge block... but only do it once, since we only
-          // replace the previous blocks in the current block one at a time.
-          for (auto successor = pred->successors().begin();
-               successor != pred->successors().end(); ++successor) {
-            if (*successor == block) {
-              *successor = split_edge_block;
-              break;
-            }
-          }
+    if (block->PredecessorCount() > 1) {
+      if (block != end_) {
+        EnsureSplitEdgeForm(block);
+      }
+      if (block->deferred()) {
+        EnsureDeferredCodeSingleEntryPoint(block);
+      }
+    }
+  }
+}
+
+void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
+  DCHECK(block->PredecessorCount() > 1 && block != end_);
+  for (auto current_pred = block->predecessors().begin();
+       current_pred != block->predecessors().end(); ++current_pred) {
+    BasicBlock* pred = *current_pred;
+    if (pred->SuccessorCount() > 1) {
+      // Found a predecessor block with multiple successors.
+      BasicBlock* split_edge_block = NewBasicBlock();
+      split_edge_block->set_control(BasicBlock::kGoto);
+      split_edge_block->successors().push_back(block);
+      split_edge_block->predecessors().push_back(pred);
+      split_edge_block->set_deferred(pred->deferred());
+      *current_pred = split_edge_block;
+      // Find a corresponding successor in the previous block, replace it
+      // with the split edge block... but only do it once, since we only
+      // replace the previous blocks in the current block one at a time.
+      for (auto successor = pred->successors().begin();
+           successor != pred->successors().end(); ++successor) {
+        if (*successor == block) {
+          *successor = split_edge_block;
+          break;
         }
       }
     }
   }
 }
 
+void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
+  // If a deferred block has multiple predecessors, they have to
+  // all be deferred. Otherwise, we can run into a situation where a range
+  // that spills only in deferred blocks inserts its spill in the block, but
+  // other ranges need moves inserted by ResolveControlFlow in the predecessors,
+  // which may clobber the register of this range.
+  // To ensure that, when a deferred block has multiple predecessors, and some
+  // are not deferred, we add a non-deferred block to collect all such edges.
+
+  DCHECK(block->deferred() && block->PredecessorCount() > 1);
+  bool all_deferred = true;
+  for (auto current_pred = block->predecessors().begin();
+       current_pred != block->predecessors().end(); ++current_pred) {
+    BasicBlock* pred = *current_pred;
+    if (!pred->deferred()) {
+      all_deferred = false;
+      break;
+    }
+  }
+
+  if (all_deferred) return;
+  BasicBlock* merger = NewBasicBlock();
+  merger->set_control(BasicBlock::kGoto);
+  merger->successors().push_back(block);
+  for (auto current_pred = block->predecessors().begin();
+       current_pred != block->predecessors().end(); ++current_pred) {
+    BasicBlock* pred = *current_pred;
+    merger->predecessors().push_back(pred);
+    pred->successors().clear();
+    pred->successors().push_back(merger);
+  }
+  merger->set_deferred(false);
+  block->predecessors().clear();
+  block->predecessors().push_back(merger);
+}
+
 void Schedule::PropagateDeferredMark() {
   // Push forward the deferred block marks through newly inserted blocks and
   // other improperly marked blocks until a fixed point is reached.
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index c99a0fc..74ba835 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -257,8 +257,12 @@
   friend class BasicBlockInstrumentor;
   friend class RawMachineAssembler;
 
+  // Ensure properties of the CFG assumed by further stages.
+  void EnsureCFGWellFormedness();
   // Ensure split-edge form for a hand-assembled schedule.
-  void EnsureSplitEdgeForm();
+  void EnsureSplitEdgeForm(BasicBlock* block);
+  // Ensure entry into a deferred block happens from a single hot block.
+  void EnsureDeferredCodeSingleEntryPoint(BasicBlock* block);
   // Copy deferred block markers down as far as possible
   void PropagateDeferredMark();
 
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index b04ba6f..58c01cc 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -324,6 +324,10 @@
       case IrOpcode::kSwitch:
         BuildBlocksForSuccessors(node);
         break;
+#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+        JS_OP_LIST(BUILD_BLOCK_JS_CASE)
+// JS opcodes are just like calls => fall through.
+#undef BUILD_BLOCK_JS_CASE
       case IrOpcode::kCall:
         if (NodeProperties::IsExceptionalCall(node)) {
           BuildBlocksForSuccessors(node);
@@ -364,6 +368,10 @@
         scheduler_->UpdatePlacement(node, Scheduler::kFixed);
         ConnectThrow(node);
         break;
+#define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+        JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
+// JS opcodes are just like calls => fall through.
+#undef CONNECT_BLOCK_JS_CASE
       case IrOpcode::kCall:
         if (NodeProperties::IsExceptionalCall(node)) {
           scheduler_->UpdatePlacement(node, Scheduler::kFixed);
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index 88931f5..a76d3e2 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -6,8 +6,10 @@
 
 #include <limits>
 
+#include "src/address-map.h"
 #include "src/base/bits.h"
 #include "src/code-factory.h"
+#include "src/compiler/access-builder.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/diamond.h"
 #include "src/compiler/linkage.h"
@@ -17,6 +19,7 @@
 #include "src/compiler/representation-change.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/source-position.h"
+#include "src/conversions-inl.h"
 #include "src/objects.h"
 #include "src/type-cache.h"
 
@@ -84,10 +87,10 @@
   static UseInfo Bool() {
     return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
   }
-  static UseInfo Float32() {
+  static UseInfo TruncatingFloat32() {
     return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
   }
-  static UseInfo Float64() {
+  static UseInfo TruncatingFloat64() {
     return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
   }
   static UseInfo PointerInt() {
@@ -122,15 +125,15 @@
     case MachineRepresentation::kTagged:
       return UseInfo::AnyTagged();
     case MachineRepresentation::kFloat64:
-    return UseInfo::Float64();
+      return UseInfo::TruncatingFloat64();
     case MachineRepresentation::kFloat32:
-      return UseInfo::Float32();
+      return UseInfo::TruncatingFloat32();
     case MachineRepresentation::kWord64:
-    return UseInfo::TruncatingWord64();
+      return UseInfo::TruncatingWord64();
     case MachineRepresentation::kWord8:
     case MachineRepresentation::kWord16:
     case MachineRepresentation::kWord32:
-    return UseInfo::TruncatingWord32();
+      return UseInfo::TruncatingWord32();
     case MachineRepresentation::kBit:
       return UseInfo::Bool();
     case MachineRepresentation::kSimd128:  // Fall through.
@@ -511,7 +514,8 @@
 
   // Helpers for specific types of binops.
   void VisitFloat64Binop(Node* node) {
-    VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+    VisitBinop(node, UseInfo::TruncatingFloat64(),
+               MachineRepresentation::kFloat64);
   }
   void VisitInt32Binop(Node* node) {
     VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -534,7 +538,7 @@
                MachineRepresentation::kWord64);
   }
   void VisitFloat64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kBit);
+    VisitBinop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kBit);
   }
   void VisitInt32Cmp(Node* node) {
     VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
@@ -563,6 +567,8 @@
       return MachineRepresentation::kBit;
     } else if (type->Is(Type::Number())) {
       return MachineRepresentation::kFloat64;
+    } else if (use.TruncatesToFloat64()) {
+      return MachineRepresentation::kFloat64;
     } else if (type->Is(Type::Internal())) {
       // We mark (u)int64 as Type::Internal.
       // TODO(jarin) This is a workaround for our lack of (u)int64
@@ -630,7 +636,7 @@
   }
 
   void VisitCall(Node* node, SimplifiedLowering* lowering) {
-    const CallDescriptor* desc = OpParameter<const CallDescriptor*>(node->op());
+    const CallDescriptor* desc = CallDescriptorOf(node->op());
     const MachineSignature* sig = desc->GetMachineSignature();
     int params = static_cast<int>(sig->parameter_count());
     // Propagate representation information from call descriptor.
@@ -705,6 +711,71 @@
     return changer_->Float64OperatorFor(node->opcode());
   }
 
+  WriteBarrierKind WriteBarrierKindFor(
+      BaseTaggedness base_taggedness,
+      MachineRepresentation field_representation, Type* field_type,
+      Node* value) {
+    if (base_taggedness == kTaggedBase &&
+        field_representation == MachineRepresentation::kTagged) {
+      Type* value_type = NodeProperties::GetType(value);
+      if (field_type->Is(Type::TaggedSigned()) ||
+          value_type->Is(Type::TaggedSigned())) {
+        // Write barriers are only for stores of heap objects.
+        return kNoWriteBarrier;
+      }
+      if (field_type->Is(Type::BooleanOrNullOrUndefined()) ||
+          value_type->Is(Type::BooleanOrNullOrUndefined())) {
+        // Write barriers are not necessary when storing true, false, null or
+        // undefined, because these special oddballs are always in the root set.
+        return kNoWriteBarrier;
+      }
+      if (value_type->IsConstant() &&
+          value_type->AsConstant()->Value()->IsHeapObject()) {
+        Handle<HeapObject> value_object =
+            Handle<HeapObject>::cast(value_type->AsConstant()->Value());
+        RootIndexMap root_index_map(jsgraph_->isolate());
+        int root_index = root_index_map.Lookup(*value_object);
+        if (root_index != RootIndexMap::kInvalidRootIndex &&
+            jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
+          // Write barriers are unnecessary for immortal immovable roots.
+          return kNoWriteBarrier;
+        }
+        if (value_object->IsMap()) {
+          // Write barriers for storing maps are cheaper.
+          return kMapWriteBarrier;
+        }
+      }
+      if (field_type->Is(Type::TaggedPointer()) ||
+          value_type->Is(Type::TaggedPointer())) {
+        // Write barriers for heap objects are cheaper.
+        return kPointerWriteBarrier;
+      }
+      NumberMatcher m(value);
+      if (m.HasValue()) {
+        if (IsSmiDouble(m.Value())) {
+          // Storing a smi doesn't need a write barrier.
+          return kNoWriteBarrier;
+        }
+        // The NumberConstant will be represented as HeapNumber.
+        return kPointerWriteBarrier;
+      }
+      return kFullWriteBarrier;
+    }
+    return kNoWriteBarrier;
+  }
+
+  WriteBarrierKind WriteBarrierKindFor(
+      BaseTaggedness base_taggedness,
+      MachineRepresentation field_representation, int field_offset,
+      Type* field_type, Node* value) {
+    if (base_taggedness == kTaggedBase &&
+        field_offset == HeapObject::kMapOffset) {
+      return kMapWriteBarrier;
+    }
+    return WriteBarrierKindFor(base_taggedness, field_representation,
+                               field_type, value);
+  }
+
   // Dispatching routine for visiting the node {node} with the usage {use}.
   // Depending on the operator, propagate new usage info to the inputs.
   void VisitNode(Node* node, Truncation truncation,
@@ -758,19 +829,23 @@
       case IrOpcode::kCall:
         return VisitCall(node, lowering);
 
-//------------------------------------------------------------------
-// JavaScript operators.
-//------------------------------------------------------------------
-// For now, we assume that all JS operators were too complex to lower
-// to Simplified and that they will always require tagged value inputs
-// and produce tagged value outputs.
-// TODO(turbofan): it might be possible to lower some JSOperators here,
-// but that responsibility really lies in the typed lowering phase.
-#define DEFINE_JS_CASE(x) case IrOpcode::k##x:
-        JS_OP_LIST(DEFINE_JS_CASE)
-#undef DEFINE_JS_CASE
+      //------------------------------------------------------------------
+      // JavaScript operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kJSToNumber: {
         VisitInputs(node);
-        return SetOutput(node, MachineRepresentation::kTagged);
+        // TODO(bmeurer): Optimize somewhat based on input type?
+        if (truncation.TruncatesToWord32()) {
+          SetOutput(node, MachineRepresentation::kWord32);
+          if (lower()) lowering->DoJSToNumberTruncatesToWord32(node, this);
+        } else if (truncation.TruncatesToFloat64()) {
+          SetOutput(node, MachineRepresentation::kFloat64);
+          if (lower()) lowering->DoJSToNumberTruncatesToFloat64(node, this);
+        } else {
+          SetOutput(node, MachineRepresentation::kTagged);
+        }
+        break;
+      }
 
       //------------------------------------------------------------------
       // Simplified operators.
@@ -962,35 +1037,39 @@
         }
         break;
       }
-      case IrOpcode::kNumberImul: {
-        VisitBinop(node, UseInfo::TruncatingWord32(),
-                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
-        if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
-        break;
-      }
       case IrOpcode::kNumberClz32: {
         VisitUnop(node, UseInfo::TruncatingWord32(),
                   MachineRepresentation::kWord32);
         if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
         break;
       }
+      case IrOpcode::kNumberImul: {
+        VisitBinop(node, UseInfo::TruncatingWord32(),
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+        if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+        break;
+      }
       case IrOpcode::kNumberCeil: {
-        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kFloat64);
         if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
         break;
       }
       case IrOpcode::kNumberFloor: {
-        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kFloat64);
         if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
         break;
       }
       case IrOpcode::kNumberRound: {
-        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kFloat64);
         if (lower()) DeferReplacement(node, lowering->Float64Round(node));
         break;
       }
       case IrOpcode::kNumberTrunc: {
-        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kFloat64);
         if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
         break;
       }
@@ -1009,7 +1088,8 @@
         break;
       }
       case IrOpcode::kNumberIsHoleNaN: {
-        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kBit);
+        VisitUnop(node, UseInfo::TruncatingFloat64(),
+                  MachineRepresentation::kBit);
         if (lower()) {
           // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
           //                                   #HoleNaNLower32)
@@ -1023,23 +1103,6 @@
         }
         break;
       }
-      case IrOpcode::kPlainPrimitiveToNumber: {
-        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
-        if (lower()) {
-          // PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
-          Operator::Properties properties = node->op()->properties();
-          Callable callable = CodeFactory::ToNumber(jsgraph_->isolate());
-          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
-              flags, properties);
-          node->InsertInput(jsgraph_->zone(), 0,
-                            jsgraph_->HeapConstant(callable.code()));
-          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
-          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
-        }
-        break;
-      }
       case IrOpcode::kReferenceEqual: {
         VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
         if (lower()) {
@@ -1051,7 +1114,8 @@
         VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
           // StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
-          Operator::Properties properties = node->op()->properties();
+          Operator::Properties properties =
+              Operator::kCommutative | Operator::kNoThrow;
           Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
           CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
           CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1059,7 +1123,9 @@
               flags, properties);
           node->InsertInput(jsgraph_->zone(), 0,
                             jsgraph_->HeapConstant(callable.code()));
-          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
           NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
         }
         break;
@@ -1068,7 +1134,7 @@
         VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
           // StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
-          Operator::Properties properties = node->op()->properties();
+          Operator::Properties properties = Operator::kNoThrow;
           Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
           CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
           CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1076,7 +1142,9 @@
               flags, properties);
           node->InsertInput(jsgraph_->zone(), 0,
                             jsgraph_->HeapConstant(callable.code()));
-          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
           NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
         }
         break;
@@ -1086,7 +1154,7 @@
         if (lower()) {
           // StringLessThanOrEqual(x, y)
           //   => Call(StringLessThanOrEqualStub, x, y, no-context)
-          Operator::Properties properties = node->op()->properties();
+          Operator::Properties properties = Operator::kNoThrow;
           Callable callable =
               CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
           CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
@@ -1095,7 +1163,9 @@
               flags, properties);
           node->InsertInput(jsgraph_->zone(), 0,
                             jsgraph_->HeapConstant(callable.code()));
-          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
           NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
         }
         break;
@@ -1104,7 +1174,7 @@
         VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
           // StringToNumber(x) => Call(StringToNumberStub, x, no-context)
-          Operator::Properties properties = node->op()->properties();
+          Operator::Properties properties = Operator::kNoThrow;
           Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
           CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
           CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1113,12 +1183,14 @@
           node->InsertInput(jsgraph_->zone(), 0,
                             jsgraph_->HeapConstant(callable.code()));
           node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
+          node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
           NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
         }
         break;
       }
       case IrOpcode::kAllocate: {
-        ProcessInput(node, 0, UseInfo::AnyTagged());
+        ProcessInput(node, 0, UseInfo::TruncatingWord32());
         ProcessRemainingInputs(node, 1);
         SetOutput(node, MachineRepresentation::kTagged);
         break;
@@ -1137,6 +1209,16 @@
                                   access.machine_type.representation()));
         ProcessRemainingInputs(node, 2);
         SetOutput(node, MachineRepresentation::kNone);
+        if (lower()) {
+          WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
+              access.base_is_tagged, access.machine_type.representation(),
+              access.offset, access.type, node->InputAt(1));
+          if (write_barrier_kind < access.write_barrier_kind) {
+            access.write_barrier_kind = write_barrier_kind;
+            NodeProperties::ChangeOp(
+                node, jsgraph_->simplified()->StoreField(access));
+          }
+        }
         break;
       }
       case IrOpcode::kLoadBuffer: {
@@ -1160,18 +1242,10 @@
                 MachineRepresentation::kFloat32) {
               output = access.machine_type().representation();
             } else {
-              if (access.machine_type().representation() !=
-                  MachineRepresentation::kFloat64) {
-                // TODO(bmeurer): See comment on abort_compilation_.
-                if (lower()) lowering->abort_compilation_ = true;
-              }
               output = MachineRepresentation::kFloat64;
             }
           }
         } else {
-          // TODO(bmeurer): See comment on abort_compilation_.
-          if (lower()) lowering->abort_compilation_ = true;
-
           // If undefined is not truncated away, we need to have the tagged
           // representation.
           output = MachineRepresentation::kTagged;
@@ -1210,11 +1284,23 @@
                          access.machine_type.representation()));  // value
         ProcessRemainingInputs(node, 3);
         SetOutput(node, MachineRepresentation::kNone);
+        if (lower()) {
+          WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
+              access.base_is_tagged, access.machine_type.representation(),
+              access.type, node->InputAt(2));
+          if (write_barrier_kind < access.write_barrier_kind) {
+            access.write_barrier_kind = write_barrier_kind;
+            NodeProperties::ChangeOp(
+                node, jsgraph_->simplified()->StoreElement(access));
+          }
+        }
         break;
       }
+      case IrOpcode::kObjectIsCallable:
       case IrOpcode::kObjectIsNumber:
       case IrOpcode::kObjectIsReceiver:
       case IrOpcode::kObjectIsSmi:
+      case IrOpcode::kObjectIsString:
       case IrOpcode::kObjectIsUndetectable: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
         SetOutput(node, MachineRepresentation::kBit);
@@ -1323,14 +1409,15 @@
         return VisitUnop(node, UseInfo::TruncatingWord32(),
                          MachineRepresentation::kWord64);
       case IrOpcode::kTruncateFloat64ToFloat32:
-        return VisitUnop(node, UseInfo::Float64(),
+        return VisitUnop(node, UseInfo::TruncatingFloat64(),
                          MachineRepresentation::kFloat32);
-      case IrOpcode::kTruncateFloat64ToInt32:
-        return VisitUnop(node, UseInfo::Float64(),
+      case IrOpcode::kTruncateFloat64ToWord32:
+        return VisitUnop(node, UseInfo::TruncatingFloat64(),
                          MachineRepresentation::kWord32);
 
       case IrOpcode::kChangeFloat32ToFloat64:
-        return VisitUnop(node, UseInfo::Float32(),
+        UNREACHABLE();
+        return VisitUnop(node, UseInfo::TruncatingFloat32(),
                          MachineRepresentation::kFloat64);
       case IrOpcode::kChangeInt32ToFloat64:
         return VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -1351,7 +1438,7 @@
       case IrOpcode::kFloat64RoundTruncate:
       case IrOpcode::kFloat64RoundTiesAway:
       case IrOpcode::kFloat64RoundUp:
-        return VisitUnop(node, UseInfo::Float64(),
+        return VisitUnop(node, UseInfo::TruncatingFloat64(),
                          MachineRepresentation::kFloat64);
       case IrOpcode::kFloat64Equal:
       case IrOpcode::kFloat64LessThan:
@@ -1359,11 +1446,12 @@
         return VisitFloat64Cmp(node);
       case IrOpcode::kFloat64ExtractLowWord32:
       case IrOpcode::kFloat64ExtractHighWord32:
-        return VisitUnop(node, UseInfo::Float64(),
+        return VisitUnop(node, UseInfo::TruncatingFloat64(),
                          MachineRepresentation::kWord32);
       case IrOpcode::kFloat64InsertLowWord32:
       case IrOpcode::kFloat64InsertHighWord32:
-        return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
+        return VisitBinop(node, UseInfo::TruncatingFloat64(),
+                          UseInfo::TruncatingWord32(),
                           MachineRepresentation::kFloat64);
       case IrOpcode::kLoadStackPointer:
       case IrOpcode::kLoadFramePointer:
@@ -1482,6 +1570,166 @@
   selector.Run(this);
 }
 
+void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
+    Node* node, RepresentationSelector* selector) {
+  DCHECK_EQ(IrOpcode::kJSToNumber, node->opcode());
+  Node* value = node->InputAt(0);
+  Node* context = node->InputAt(1);
+  Node* frame_state = node->InputAt(2);
+  Node* effect = node->InputAt(3);
+  Node* control = node->InputAt(4);
+  Node* throwing;
+
+  Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0;
+  {
+    vtrue0 = graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), value);
+    vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0;
+  {
+    throwing = vfalse0 = efalse0 =
+        graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
+                         frame_state, efalse0, if_false0);
+    if_false0 = graph()->NewNode(common()->IfSuccess(), throwing);
+
+    Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = efalse0;
+    Node* vtrue1;
+    {
+      vtrue1 =
+          graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), vfalse0);
+      vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
+    }
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = efalse0;
+    Node* vfalse1;
+    {
+      vfalse1 = efalse1 = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+          efalse1, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    efalse0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue0, vfalse0, control);
+
+  // Replace effect and control uses appropriately.
+  for (Edge edge : node->use_edges()) {
+    if (NodeProperties::IsControlEdge(edge)) {
+      if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+        edge.from()->ReplaceUses(control);
+        edge.from()->Kill();
+      } else if (edge.from()->opcode() == IrOpcode::kIfException) {
+        edge.UpdateTo(throwing);
+      } else {
+        UNREACHABLE();
+      }
+    } else if (NodeProperties::IsEffectEdge(edge)) {
+      edge.UpdateTo(effect);
+    }
+  }
+
+  selector->DeferReplacement(node, value);
+}
+
+void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
+    Node* node, RepresentationSelector* selector) {
+  DCHECK_EQ(IrOpcode::kJSToNumber, node->opcode());
+  Node* value = node->InputAt(0);
+  Node* context = node->InputAt(1);
+  Node* frame_state = node->InputAt(2);
+  Node* effect = node->InputAt(3);
+  Node* control = node->InputAt(4);
+  Node* throwing;
+
+  Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+  Node* branch0 =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* etrue0 = effect;
+  Node* vtrue0 =
+      graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), value);
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* efalse0 = effect;
+  Node* vfalse0;
+  {
+    throwing = vfalse0 = efalse0 =
+        graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
+                         frame_state, efalse0, if_false0);
+    if_false0 = graph()->NewNode(common()->IfSuccess(), throwing);
+
+    Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* etrue1 = efalse0;
+    Node* vtrue1 =
+        graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), vfalse0);
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* efalse1 = efalse0;
+    Node* vfalse1;
+    {
+      vfalse1 = efalse1 = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+          efalse1, if_false1);
+      vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    efalse0 =
+        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                               vtrue1, vfalse1, if_false0);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+                           vtrue0, vfalse0, control);
+
+  // Replace effect and control uses appropriately.
+  for (Edge edge : node->use_edges()) {
+    if (NodeProperties::IsControlEdge(edge)) {
+      if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+        edge.from()->ReplaceUses(control);
+        edge.from()->Kill();
+      } else if (edge.from()->opcode() == IrOpcode::kIfException) {
+        edge.UpdateTo(throwing);
+      } else {
+        UNREACHABLE();
+      }
+    } else if (NodeProperties::IsEffectEdge(edge)) {
+      edge.UpdateTo(effect);
+    }
+  }
+
+  selector->DeferReplacement(node, value);
+}
 
 void SimplifiedLowering::DoLoadBuffer(Node* node,
                                       MachineRepresentation output_rep,
@@ -1507,9 +1755,11 @@
     Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
     Node* etrue = graph()->NewNode(machine()->Load(access_type), buffer, index,
                                    effect, if_true);
+    Type* element_type =
+        Type::Intersect(NodeProperties::GetType(node), Type::Number(), zone());
     Node* vtrue = changer->GetRepresentationFor(
-        etrue, access_type.representation(), NodeProperties::GetType(node),
-        output_rep, Truncation::None());
+        etrue, access_type.representation(), element_type, output_rep,
+        Truncation::None());
 
     Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
     Node* efalse = effect;
@@ -2200,6 +2450,26 @@
   NodeProperties::ChangeOp(node, op);
 }
 
+Node* SimplifiedLowering::ToNumberCode() {
+  if (!to_number_code_.is_set()) {
+    Callable callable = CodeFactory::ToNumber(isolate());
+    to_number_code_.set(jsgraph()->HeapConstant(callable.code()));
+  }
+  return to_number_code_.get();
+}
+
+Operator const* SimplifiedLowering::ToNumberOperator() {
+  if (!to_number_operator_.is_set()) {
+    Callable callable = CodeFactory::ToNumber(isolate());
+    CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+        Operator::kNoProperties);
+    to_number_operator_.set(common()->Call(desc));
+  }
+  return to_number_operator_.get();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 8b711a9..baffe20 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -21,6 +21,7 @@
 
 // Forward declarations.
 class RepresentationChanger;
+class RepresentationSelector;
 class SourcePositionTable;
 
 class SimplifiedLowering final {
@@ -31,6 +32,10 @@
 
   void LowerAllNodes();
 
+  void DoJSToNumberTruncatesToFloat64(Node* node,
+                                      RepresentationSelector* selector);
+  void DoJSToNumberTruncatesToWord32(Node* node,
+                                     RepresentationSelector* selector);
   // TODO(turbofan): The representation can be removed once the result of the
   // representation analysis is stored in the node bounds.
   void DoLoadBuffer(Node* node, MachineRepresentation rep,
@@ -38,15 +43,12 @@
   void DoStoreBuffer(Node* node);
   void DoShift(Node* node, Operator const* op, Type* rhs_type);
 
-  // TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
-  // typing hack to support the gigantic "asm.js should be fast without proper
-  // verifier"-hack, ... Kill this! Soon! Really soon! I'm serious!
-  bool abort_compilation_ = false;
-
  private:
   JSGraph* const jsgraph_;
   Zone* const zone_;
   TypeCache const& type_cache_;
+  SetOncePointer<Node> to_number_code_;
+  SetOncePointer<Operator const> to_number_operator_;
 
   // TODO(danno): SimplifiedLowering shouldn't know anything about the source
   // positions table, but must for now since there currently is no other way to
@@ -64,6 +66,9 @@
   Node* Uint32Div(Node* const node);
   Node* Uint32Mod(Node* const node);
 
+  Node* ToNumberCode();
+  Operator const* ToNumberOperator();
+
   friend class RepresentationSelector;
 
   Isolate* isolate() { return jsgraph_->isolate(); }
@@ -72,6 +77,7 @@
   Graph* graph() { return jsgraph()->graph(); }
   CommonOperatorBuilder* common() { return jsgraph()->common(); }
   MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
+  SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
 };
 
 }  // namespace compiler
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 012004a..6fbf16e 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -8,6 +8,7 @@
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/conversions-inl.h"
 #include "src/type-cache.h"
 
@@ -31,34 +32,39 @@
       if (m.IsBooleanNot()) return Replace(m.InputAt(0));
       break;
     }
-    case IrOpcode::kChangeBitToBool: {
+    case IrOpcode::kChangeBitToTagged: {
       Int32Matcher m(node->InputAt(0));
       if (m.Is(0)) return Replace(jsgraph()->FalseConstant());
       if (m.Is(1)) return Replace(jsgraph()->TrueConstant());
-      if (m.IsChangeBoolToBit()) return Replace(m.InputAt(0));
+      if (m.IsChangeTaggedToBit()) return Replace(m.InputAt(0));
       break;
     }
-    case IrOpcode::kChangeBoolToBit: {
+    case IrOpcode::kChangeTaggedToBit: {
       HeapObjectMatcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceInt32(m.Value()->BooleanValue());
-      if (m.IsChangeBitToBool()) return Replace(m.InputAt(0));
+      if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
       break;
     }
     case IrOpcode::kChangeFloat64ToTagged: {
       Float64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceNumber(m.Value());
+      if (m.IsChangeTaggedToFloat64()) return Replace(m.node()->InputAt(0));
       break;
     }
+    case IrOpcode::kChangeInt31ToTaggedSigned:
     case IrOpcode::kChangeInt32ToTagged: {
       Int32Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceNumber(m.Value());
+      if (m.IsChangeTaggedToInt32() || m.IsChangeTaggedSignedToInt32()) {
+        return Replace(m.InputAt(0));
+      }
       break;
     }
     case IrOpcode::kChangeTaggedToFloat64: {
       NumberMatcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceFloat64(m.Value());
       if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
-      if (m.IsChangeInt32ToTagged()) {
+      if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
         return Change(node, machine()->ChangeInt32ToFloat64(), m.InputAt(0));
       }
       if (m.IsChangeUint32ToTagged()) {
@@ -72,7 +78,9 @@
       if (m.IsChangeFloat64ToTagged()) {
         return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
       }
-      if (m.IsChangeInt32ToTagged()) return Replace(m.InputAt(0));
+      if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
+        return Replace(m.InputAt(0));
+      }
       break;
     }
     case IrOpcode::kChangeTaggedToUint32: {
@@ -89,6 +97,18 @@
       if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
       break;
     }
+    case IrOpcode::kTruncateTaggedToWord32: {
+      NumberMatcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+      if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged() ||
+          m.IsChangeUint32ToTagged()) {
+        return Replace(m.InputAt(0));
+      }
+      if (m.IsChangeFloat64ToTagged()) {
+        return Change(node, machine()->TruncateFloat64ToWord32(), m.InputAt(0));
+      }
+      break;
+    }
     case IrOpcode::kNumberCeil:
     case IrOpcode::kNumberFloor:
     case IrOpcode::kNumberRound:
@@ -102,6 +122,8 @@
     }
     case IrOpcode::kReferenceEqual:
       return ReduceReferenceEqual(node);
+    case IrOpcode::kTypeGuard:
+      return ReduceTypeGuard(node);
     default:
       break;
   }
@@ -124,6 +146,14 @@
   return NoChange();
 }
 
+Reduction SimplifiedOperatorReducer::ReduceTypeGuard(Node* node) {
+  DCHECK_EQ(IrOpcode::kTypeGuard, node->opcode());
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetTypeOrAny(input);
+  Type* const guard_type = TypeOf(node->op());
+  if (input_type->Is(guard_type)) return Replace(input);
+  return NoChange();
+}
 
 Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
                                             Node* a) {
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 13301c2..70750a8 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -30,6 +30,7 @@
 
  private:
   Reduction ReduceReferenceEqual(Node* node);
+  Reduction ReduceTypeGuard(Node* node);
 
   Reduction Change(Node* node, const Operator* op, Node* a);
   Reduction ReplaceFloat64(double value);
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index daa9501..0350403 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -13,6 +13,10 @@
 namespace internal {
 namespace compiler {
 
+size_t hash_value(BaseTaggedness base_taggedness) {
+  return static_cast<uint8_t>(base_taggedness);
+}
+
 std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
   switch (base_taggedness) {
     case kUntaggedBase:
@@ -84,6 +88,9 @@
 
 
 bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
+  // On purpose we don't include the write barrier kind here, as this method is
+  // really only relevant for eliminating loads and they don't care about the
+  // write barrier mode.
   return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
          lhs.machine_type == rhs.machine_type;
 }
@@ -95,6 +102,9 @@
 
 
 size_t hash_value(FieldAccess const& access) {
+  // On purpose we don't include the write barrier kind here, as this method is
+  // really only relevant for eliminating loads and they don't care about the
+  // write barrier mode.
   return base::hash_combine(access.base_is_tagged, access.offset,
                             access.machine_type);
 }
@@ -110,12 +120,15 @@
   }
 #endif
   access.type->PrintTo(os);
-  os << ", " << access.machine_type << "]";
+  os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
   return os;
 }
 
 
 bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
+  // On purpose we don't include the write barrier kind here, as this method is
+  // really only relevant for eliminating loads and they don't care about the
+  // write barrier mode.
   return lhs.base_is_tagged == rhs.base_is_tagged &&
          lhs.header_size == rhs.header_size &&
          lhs.machine_type == rhs.machine_type;
@@ -128,6 +141,9 @@
 
 
 size_t hash_value(ElementAccess const& access) {
+  // On purpose we don't include the write barrier kind here, as this method is
+  // really only relevant for eliminating loads and they don't care about the
+  // write barrier mode.
   return base::hash_combine(access.base_is_tagged, access.header_size,
                             access.machine_type);
 }
@@ -136,7 +152,7 @@
 std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
   os << access.base_is_tagged << ", " << access.header_size << ", ";
   access.type->PrintTo(os);
-  os << ", " << access.machine_type;
+  os << ", " << access.machine_type << ", " << access.write_barrier_kind;
   return os;
 }
 
@@ -156,51 +172,58 @@
   return OpParameter<ElementAccess>(op);
 }
 
-#define PURE_OP_LIST(V)                                  \
-  V(BooleanNot, Operator::kNoProperties, 1)              \
-  V(BooleanToNumber, Operator::kNoProperties, 1)         \
-  V(NumberEqual, Operator::kCommutative, 2)              \
-  V(NumberLessThan, Operator::kNoProperties, 2)          \
-  V(NumberLessThanOrEqual, Operator::kNoProperties, 2)   \
-  V(NumberAdd, Operator::kCommutative, 2)                \
-  V(NumberSubtract, Operator::kNoProperties, 2)          \
-  V(NumberMultiply, Operator::kCommutative, 2)           \
-  V(NumberDivide, Operator::kNoProperties, 2)            \
-  V(NumberModulus, Operator::kNoProperties, 2)           \
-  V(NumberBitwiseOr, Operator::kCommutative, 2)          \
-  V(NumberBitwiseXor, Operator::kCommutative, 2)         \
-  V(NumberBitwiseAnd, Operator::kCommutative, 2)         \
-  V(NumberShiftLeft, Operator::kNoProperties, 2)         \
-  V(NumberShiftRight, Operator::kNoProperties, 2)        \
-  V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
-  V(NumberImul, Operator::kNoProperties, 2)              \
-  V(NumberClz32, Operator::kNoProperties, 1)             \
-  V(NumberCeil, Operator::kNoProperties, 1)              \
-  V(NumberFloor, Operator::kNoProperties, 1)             \
-  V(NumberRound, Operator::kNoProperties, 1)             \
-  V(NumberTrunc, Operator::kNoProperties, 1)             \
-  V(NumberToInt32, Operator::kNoProperties, 1)           \
-  V(NumberToUint32, Operator::kNoProperties, 1)          \
-  V(NumberIsHoleNaN, Operator::kNoProperties, 1)         \
-  V(PlainPrimitiveToNumber, Operator::kNoProperties, 1)  \
-  V(StringToNumber, Operator::kNoProperties, 1)          \
-  V(ChangeTaggedToInt32, Operator::kNoProperties, 1)     \
-  V(ChangeTaggedToUint32, Operator::kNoProperties, 1)    \
-  V(ChangeTaggedToFloat64, Operator::kNoProperties, 1)   \
-  V(ChangeInt32ToTagged, Operator::kNoProperties, 1)     \
-  V(ChangeUint32ToTagged, Operator::kNoProperties, 1)    \
-  V(ChangeFloat64ToTagged, Operator::kNoProperties, 1)   \
-  V(ChangeBoolToBit, Operator::kNoProperties, 1)         \
-  V(ChangeBitToBool, Operator::kNoProperties, 1)         \
-  V(ObjectIsNumber, Operator::kNoProperties, 1)          \
-  V(ObjectIsReceiver, Operator::kNoProperties, 1)        \
-  V(ObjectIsSmi, Operator::kNoProperties, 1)             \
-  V(ObjectIsUndetectable, Operator::kNoProperties, 1)
+Type* TypeOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kTypeGuard, op->opcode());
+  return OpParameter<Type*>(op);
+}
 
-#define NO_THROW_OP_LIST(V)                 \
-  V(StringEqual, Operator::kCommutative, 2) \
-  V(StringLessThan, Operator::kNoThrow, 2)  \
-  V(StringLessThanOrEqual, Operator::kNoThrow, 2)
+#define PURE_OP_LIST(V)                                    \
+  V(BooleanNot, Operator::kNoProperties, 1)                \
+  V(BooleanToNumber, Operator::kNoProperties, 1)           \
+  V(NumberEqual, Operator::kCommutative, 2)                \
+  V(NumberLessThan, Operator::kNoProperties, 2)            \
+  V(NumberLessThanOrEqual, Operator::kNoProperties, 2)     \
+  V(NumberAdd, Operator::kCommutative, 2)                  \
+  V(NumberSubtract, Operator::kNoProperties, 2)            \
+  V(NumberMultiply, Operator::kCommutative, 2)             \
+  V(NumberDivide, Operator::kNoProperties, 2)              \
+  V(NumberModulus, Operator::kNoProperties, 2)             \
+  V(NumberBitwiseOr, Operator::kCommutative, 2)            \
+  V(NumberBitwiseXor, Operator::kCommutative, 2)           \
+  V(NumberBitwiseAnd, Operator::kCommutative, 2)           \
+  V(NumberShiftLeft, Operator::kNoProperties, 2)           \
+  V(NumberShiftRight, Operator::kNoProperties, 2)          \
+  V(NumberShiftRightLogical, Operator::kNoProperties, 2)   \
+  V(NumberImul, Operator::kCommutative, 2)                 \
+  V(NumberClz32, Operator::kNoProperties, 1)               \
+  V(NumberCeil, Operator::kNoProperties, 1)                \
+  V(NumberFloor, Operator::kNoProperties, 1)               \
+  V(NumberRound, Operator::kNoProperties, 1)               \
+  V(NumberTrunc, Operator::kNoProperties, 1)               \
+  V(NumberToInt32, Operator::kNoProperties, 1)             \
+  V(NumberToUint32, Operator::kNoProperties, 1)            \
+  V(NumberIsHoleNaN, Operator::kNoProperties, 1)           \
+  V(StringToNumber, Operator::kNoProperties, 1)            \
+  V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1) \
+  V(ChangeTaggedToInt32, Operator::kNoProperties, 1)       \
+  V(ChangeTaggedToUint32, Operator::kNoProperties, 1)      \
+  V(ChangeTaggedToFloat64, Operator::kNoProperties, 1)     \
+  V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1) \
+  V(ChangeInt32ToTagged, Operator::kNoProperties, 1)       \
+  V(ChangeUint32ToTagged, Operator::kNoProperties, 1)      \
+  V(ChangeFloat64ToTagged, Operator::kNoProperties, 1)     \
+  V(ChangeTaggedToBit, Operator::kNoProperties, 1)         \
+  V(ChangeBitToTagged, Operator::kNoProperties, 1)         \
+  V(TruncateTaggedToWord32, Operator::kNoProperties, 1)    \
+  V(ObjectIsCallable, Operator::kNoProperties, 1)          \
+  V(ObjectIsNumber, Operator::kNoProperties, 1)            \
+  V(ObjectIsReceiver, Operator::kNoProperties, 1)          \
+  V(ObjectIsSmi, Operator::kNoProperties, 1)               \
+  V(ObjectIsString, Operator::kNoProperties, 1)            \
+  V(ObjectIsUndetectable, Operator::kNoProperties, 1)      \
+  V(StringEqual, Operator::kCommutative, 2)                \
+  V(StringLessThan, Operator::kNoProperties, 2)            \
+  V(StringLessThanOrEqual, Operator::kNoProperties, 2)
 
 struct SimplifiedOperatorGlobalCache final {
 #define PURE(Name, properties, input_count)                                \
@@ -213,15 +236,14 @@
   PURE_OP_LIST(PURE)
 #undef PURE
 
-#define NO_THROW(Name, properties, input_count)                               \
-  struct Name##Operator final : public Operator {                             \
-    Name##Operator()                                                          \
-        : Operator(IrOpcode::k##Name, Operator::kNoThrow | properties, #Name, \
-                   input_count, 1, 1, 1, 1, 0) {}                             \
-  };                                                                          \
-  Name##Operator k##Name;
-  NO_THROW_OP_LIST(NO_THROW)
-#undef NO_THROW
+  template <PretenureFlag kPretenure>
+  struct AllocateOperator final : public Operator1<PretenureFlag> {
+    AllocateOperator()
+        : Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
+                                   "Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
+  };
+  AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
+  AllocateOperator<TENURED> kAllocateTenuredOperator;
 
 #define BUFFER_ACCESS(Type, type, TYPE, ctype, size)                          \
   struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> {  \
@@ -256,7 +278,6 @@
 #define GET_FROM_CACHE(Name, properties, input_count) \
   const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
 PURE_OP_LIST(GET_FROM_CACHE)
-NO_THROW_OP_LIST(GET_FROM_CACHE)
 #undef GET_FROM_CACHE
 
 
@@ -266,11 +287,32 @@
                                "ReferenceEqual", 2, 0, 0, 1, 0, 0);
 }
 
+const Operator* SimplifiedOperatorBuilder::TypeGuard(Type* type) {
+  class TypeGuardOperator final : public Operator1<Type*> {
+   public:
+    explicit TypeGuardOperator(Type* type)
+        : Operator1<Type*>(                           // --
+              IrOpcode::kTypeGuard, Operator::kPure,  // opcode
+              "TypeGuard",                            // name
+              1, 0, 1, 1, 0, 0,                       // counts
+              type) {}                                // parameter
+
+    void PrintParameter(std::ostream& os) const final {
+      parameter()->PrintTo(os);
+    }
+  };
+  return new (zone()) TypeGuardOperator(type);
+}
 
 const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
-  return new (zone())
-      Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
-                               "Allocate", 1, 1, 1, 1, 1, 0, pretenure);
+  switch (pretenure) {
+    case NOT_TENURED:
+      return &cache_.kAllocateNotTenuredOperator;
+    case TENURED:
+      return &cache_.kAllocateTenuredOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
 }
 
 
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index a39d864..20d8a39 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -25,8 +25,9 @@
 class Operator;
 struct SimplifiedOperatorGlobalCache;
 
+enum BaseTaggedness : uint8_t { kUntaggedBase, kTaggedBase };
 
-enum BaseTaggedness { kUntaggedBase, kTaggedBase };
+size_t hash_value(BaseTaggedness);
 
 std::ostream& operator<<(std::ostream&, BaseTaggedness);
 
@@ -63,6 +64,7 @@
   MaybeHandle<Name> name;         // debugging only.
   Type* type;                     // type of the field.
   MachineType machine_type;       // machine type of the field.
+  WriteBarrierKind write_barrier_kind;  // write barrier hint.
 
   int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
 };
@@ -86,6 +88,7 @@
   int header_size;                // size of the header, without tag.
   Type* type;                     // type of the element.
   MachineType machine_type;       // machine type of the element.
+  WriteBarrierKind write_barrier_kind;  // write barrier hint.
 
   int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
 };
@@ -99,6 +102,7 @@
 
 ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
 
+Type* TypeOf(const Operator* op) WARN_UNUSED_RESULT;
 
 // Interface for building simplified operators, which represent the
 // medium-level operations of V8, including adding numbers, allocating objects,
@@ -153,8 +157,6 @@
   const Operator* NumberToUint32();
   const Operator* NumberIsHoleNaN();
 
-  const Operator* PlainPrimitiveToNumber();
-
   const Operator* ReferenceEqual(Type* type);
 
   const Operator* StringEqual();
@@ -162,20 +164,27 @@
   const Operator* StringLessThanOrEqual();
   const Operator* StringToNumber();
 
+  const Operator* ChangeTaggedSignedToInt32();
   const Operator* ChangeTaggedToInt32();
   const Operator* ChangeTaggedToUint32();
   const Operator* ChangeTaggedToFloat64();
+  const Operator* ChangeInt31ToTaggedSigned();
   const Operator* ChangeInt32ToTagged();
   const Operator* ChangeUint32ToTagged();
   const Operator* ChangeFloat64ToTagged();
-  const Operator* ChangeBoolToBit();
-  const Operator* ChangeBitToBool();
+  const Operator* ChangeTaggedToBit();
+  const Operator* ChangeBitToTagged();
+  const Operator* TruncateTaggedToWord32();
 
+  const Operator* ObjectIsCallable();
   const Operator* ObjectIsNumber();
   const Operator* ObjectIsReceiver();
   const Operator* ObjectIsSmi();
+  const Operator* ObjectIsString();
   const Operator* ObjectIsUndetectable();
 
+  const Operator* TypeGuard(Type* type);
+
   const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
 
   const Operator* LoadField(FieldAccess const&);
diff --git a/src/compiler/source-position.cc b/src/compiler/source-position.cc
index 48361ec..80f1800 100644
--- a/src/compiler/source-position.cc
+++ b/src/compiler/source-position.cc
@@ -16,7 +16,8 @@
       : source_positions_(source_positions) {}
 
   void Decorate(Node* node) final {
-    source_positions_->table_.Set(node, source_positions_->current_position_);
+    source_positions_->SetSourcePosition(node,
+                                         source_positions_->current_position_);
   }
 
  private:
@@ -49,6 +50,10 @@
   return table_.Get(node);
 }
 
+void SourcePositionTable::SetSourcePosition(Node* node,
+                                            SourcePosition position) {
+  table_.Set(node, position);
+}
 
 void SourcePositionTable::Print(std::ostream& os) const {
   os << "{";
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
index 81db1d2..912f188 100644
--- a/src/compiler/source-position.h
+++ b/src/compiler/source-position.h
@@ -38,8 +38,7 @@
   return !(lhs == rhs);
 }
 
-
-class SourcePositionTable final {
+class SourcePositionTable final : public ZoneObject {
  public:
   class Scope final {
    public:
@@ -66,14 +65,12 @@
   };
 
   explicit SourcePositionTable(Graph* graph);
-  ~SourcePositionTable() {
-    if (decorator_) RemoveDecorator();
-  }
 
   void AddDecorator();
   void RemoveDecorator();
 
   SourcePosition GetSourcePosition(Node* node) const;
+  void SetSourcePosition(Node* node, SourcePosition position);
 
   void Print(std::ostream& os) const;
 
diff --git a/src/compiler/tail-call-optimization.cc b/src/compiler/tail-call-optimization.cc
index 6635fb9..7e1623a 100644
--- a/src/compiler/tail-call-optimization.cc
+++ b/src/compiler/tail-call-optimization.cc
@@ -20,7 +20,7 @@
   // other effect between the Call and the Return nodes.
   Node* const call = NodeProperties::GetValueInput(node, 0);
   if (call->opcode() == IrOpcode::kCall &&
-      OpParameter<CallDescriptor const*>(call)->SupportsTailCalls() &&
+      CallDescriptorOf(call->op())->SupportsTailCalls() &&
       NodeProperties::GetEffectInput(node) == call &&
       !NodeProperties::IsExceptionalCall(call)) {
     Node* const control = NodeProperties::GetControlInput(node);
@@ -71,7 +71,7 @@
                           NodeProperties::GetValueInput(call, index));
       }
       NodeProperties::ChangeOp(
-          node, common()->TailCall(OpParameter<CallDescriptor const*>(call)));
+          node, common()->TailCall(CallDescriptorOf(call->op())));
       return Changed(node);
     }
   }
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 81c3d3d..d98d2fe 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -97,6 +97,7 @@
       COMMON_OP_LIST(DECLARE_CASE)
       SIMPLIFIED_OP_LIST(DECLARE_CASE)
       MACHINE_OP_LIST(DECLARE_CASE)
+      MACHINE_SIMD_OP_LIST(DECLARE_CASE)
       JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
       JS_OBJECT_OP_LIST(DECLARE_CASE)
       JS_CONTEXT_OP_LIST(DECLARE_CASE)
@@ -143,6 +144,7 @@
       COMMON_OP_LIST(DECLARE_CASE)
       SIMPLIFIED_OP_LIST(DECLARE_CASE)
       MACHINE_OP_LIST(DECLARE_CASE)
+      MACHINE_SIMD_OP_LIST(DECLARE_CASE)
       JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
       JS_OBJECT_OP_LIST(DECLARE_CASE)
       JS_CONTEXT_OP_LIST(DECLARE_CASE)
@@ -247,9 +249,11 @@
   static Type* NumberToInt32(Type*, Typer*);
   static Type* NumberToUint32(Type*, Typer*);
 
+  static Type* ObjectIsCallable(Type*, Typer*);
   static Type* ObjectIsNumber(Type*, Typer*);
   static Type* ObjectIsReceiver(Type*, Typer*);
   static Type* ObjectIsSmi(Type*, Typer*);
+  static Type* ObjectIsString(Type*, Typer*);
   static Type* ObjectIsUndetectable(Type*, Typer*);
 
   static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
@@ -556,6 +560,11 @@
 
 // Type checks.
 
+Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
+  if (type->Is(Type::Function())) return t->singleton_true_;
+  if (type->Is(Type::Primitive())) return t->singleton_false_;
+  return Type::Boolean();
+}
 
 Type* Typer::Visitor::ObjectIsNumber(Type* type, Typer* t) {
   if (type->Is(Type::Number())) return t->singleton_true_;
@@ -577,6 +586,11 @@
   return Type::Boolean();
 }
 
+Type* Typer::Visitor::ObjectIsString(Type* type, Typer* t) {
+  if (type->Is(Type::String())) return t->singleton_true_;
+  if (!type->Maybe(Type::String())) return t->singleton_false_;
+  return Type::Boolean();
+}
 
 Type* Typer::Visitor::ObjectIsUndetectable(Type* type, Typer* t) {
   if (type->Is(Type::Undetectable())) return t->singleton_true_;
@@ -624,6 +638,14 @@
   return Type::Internal();  // TODO(rossberg): Add int64 bitset type?
 }
 
+// TODO(gdeepti) : Fix this to do something meaningful.
+Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
+  return Type::Internal();
+}
+
+Type* Typer::Visitor::TypeRelocatableInt64Constant(Node* node) {
+  return Type::Internal();
+}
 
 Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
   return Type::Intersect(Type::Of(OpParameter<float>(node), zone()),
@@ -677,19 +699,16 @@
   return nullptr;
 }
 
-
-Type* Typer::Visitor::TypeEffectSet(Node* node) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-
-Type* Typer::Visitor::TypeGuard(Node* node) {
+Type* Typer::Visitor::TypeTypeGuard(Node* node) {
   Type* input_type = Operand(node, 0);
-  Type* guard_type = OpParameter<Type*>(node);
+  Type* guard_type = TypeOf(node->op());
   return Type::Intersect(input_type, guard_type, zone());
 }
 
+Type* Typer::Visitor::TypeCheckPoint(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
 
 Type* Typer::Visitor::TypeBeginRegion(Node* node) {
   UNREACHABLE();
@@ -1331,34 +1350,6 @@
 
 
 Type* Typer::Visitor::TypeJSLoadNamed(Node* node) {
-  Factory* const f = isolate()->factory();
-  Handle<Name> name = NamedAccessOf(node->op()).name();
-  if (name.is_identical_to(f->prototype_string())) {
-    Type* receiver = Operand(node, 0);
-    if (receiver->Is(Type::None())) return Type::None();
-    if (receiver->IsConstant() &&
-        receiver->AsConstant()->Value()->IsJSFunction()) {
-      Handle<JSFunction> function =
-          Handle<JSFunction>::cast(receiver->AsConstant()->Value());
-      if (function->has_prototype()) {
-        // We need to add a code dependency on the initial map of the {function}
-        // in order to be notified about changes to "prototype" of {function},
-        // so we can only infer a constant type if deoptimization is enabled.
-        if (flags() & kDeoptimizationEnabled) {
-          JSFunction::EnsureHasInitialMap(function);
-          Handle<Map> initial_map(function->initial_map(), isolate());
-          dependencies()->AssumeInitialMapCantChange(initial_map);
-          return Type::Constant(handle(initial_map->prototype(), isolate()),
-                                zone());
-        }
-      }
-    } else if (receiver->IsClass() &&
-               receiver->AsClass()->Map()->IsJSFunctionMap()) {
-      Handle<Map> map = receiver->AsClass()->Map();
-      return map->has_non_instance_prototype() ? Type::Primitive()
-                                               : Type::Receiver();
-    }
-  }
   return Type::Any();
 }
 
@@ -1537,9 +1528,6 @@
 // JS other operators.
 
 
-Type* Typer::Visitor::TypeJSYield(Node* node) { return Type::Any(); }
-
-
 Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
   return Type::Receiver();
 }
@@ -1793,12 +1781,6 @@
   return Type::Boolean();
 }
 
-
-Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
-  return TypeUnaryOp(node, ToNumber);
-}
-
-
 // static
 Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
   if (lhs->IsConstant() && rhs->Is(lhs)) {
@@ -1833,6 +1815,11 @@
 
 }  // namespace
 
+Type* Typer::Visitor::TypeChangeTaggedSignedToInt32(Node* node) {
+  Type* arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+  return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
+}
 
 Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
   Type* arg = Operand(node, 0);
@@ -1854,6 +1841,13 @@
   return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
 }
 
+Type* Typer::Visitor::TypeChangeInt31ToTaggedSigned(Node* node) {
+  Type* arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg->Is(Type::Signed31()));
+  Type* rep =
+      arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
+  return ChangeRepresentation(arg, rep, zone());
+}
 
 Type* Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
   Type* arg = Operand(node, 0);
@@ -1877,20 +1871,23 @@
   return ChangeRepresentation(arg, Type::Tagged(), zone());
 }
 
-
-Type* Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+Type* Typer::Visitor::TypeChangeTaggedToBit(Node* node) {
   Type* arg = Operand(node, 0);
   // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
   return ChangeRepresentation(arg, Type::UntaggedBit(), zone());
 }
 
-
-Type* Typer::Visitor::TypeChangeBitToBool(Node* node) {
+Type* Typer::Visitor::TypeChangeBitToTagged(Node* node) {
   Type* arg = Operand(node, 0);
   // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
   return ChangeRepresentation(arg, Type::TaggedPointer(), zone());
 }
 
+Type* Typer::Visitor::TypeTruncateTaggedToWord32(Node* node) {
+  Type* arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg->Is(Type::Number()));
+  return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
+}
 
 Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
 
@@ -1949,9 +1946,9 @@
   // TODO(bmeurer): This typing is not yet correct. Since we can still access
   // out of bounds, the type in the general case has to include Undefined.
   switch (BufferAccessOf(node->op()).external_array_type()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
-  case kExternal##Type##Array:                          \
-    return typer_->cache_.k##Type;
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
+  case kExternal##ElemType##Array:                          \
+    return Type::Union(typer_->cache_.k##ElemType, Type::Undefined(), zone());
     TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
   }
@@ -1982,6 +1979,9 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
+  return TypeUnaryOp(node, ObjectIsCallable);
+}
 
 Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
   return TypeUnaryOp(node, ObjectIsNumber);
@@ -1997,6 +1997,9 @@
   return TypeUnaryOp(node, ObjectIsSmi);
 }
 
+Type* Typer::Visitor::TypeObjectIsString(Node* node) {
+  return TypeUnaryOp(node, ObjectIsString);
+}
 
 Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
   return TypeUnaryOp(node, ObjectIsUndetectable);
@@ -2194,6 +2197,9 @@
 
 Type* Typer::Visitor::TypeUint64Mod(Node* node) { return Type::Internal(); }
 
+Type* Typer::Visitor::TypeBitcastWordToTagged(Node* node) {
+  return Type::TaggedPointer();
+}
 
 Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
   return Type::Intersect(Type::Number(), Type::UntaggedFloat64(), zone());
@@ -2270,9 +2276,9 @@
   return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
 }
 
-
-Type* Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
-  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+Type* Typer::Visitor::TypeTruncateFloat64ToWord32(Node* node) {
+  return Type::Intersect(Type::Integral32(), Type::UntaggedIntegral32(),
+                         zone());
 }
 
 
@@ -2280,6 +2286,9 @@
   return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
 }
 
+Type* Typer::Visitor::TypeRoundFloat64ToInt32(Node* node) {
+  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
 
 Type* Typer::Visitor::TypeRoundInt32ToFloat32(Node* node) {
   return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
@@ -2336,6 +2345,9 @@
 
 Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeFloat32SubPreserveNan(Node* node) {
+  return Type::Number();
+}
 
 Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
 
@@ -2376,6 +2388,9 @@
 
 Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeFloat64SubPreserveNan(Node* node) {
+  return Type::Number();
+}
 
 Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
 
@@ -2503,12 +2518,18 @@
 
 Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
 
-
 Type* Typer::Visitor::TypeCheckedStore(Node* node) {
   UNREACHABLE();
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeAtomicLoad(Node* node) { return Type::Any(); }
+
+Type* Typer::Visitor::TypeAtomicStore(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
 Type* Typer::Visitor::TypeInt32PairAdd(Node* node) { return Type::Internal(); }
 
 Type* Typer::Visitor::TypeInt32PairSub(Node* node) { return Type::Internal(); }
@@ -2521,8 +2542,24 @@
 
 Type* Typer::Visitor::TypeWord32PairSar(Node* node) { return Type::Internal(); }
 
-// Heap constants.
+// SIMD type methods.
 
+#define SIMD_RETURN_SIMD(Name) \
+  Type* Typer::Visitor::Type##Name(Node* node) { return Type::Simd(); }
+MACHINE_SIMD_RETURN_SIMD_OP_LIST(SIMD_RETURN_SIMD)
+#undef SIMD_RETURN_SIMD
+
+#define SIMD_RETURN_NUM(Name) \
+  Type* Typer::Visitor::Type##Name(Node* node) { return Type::Number(); }
+MACHINE_SIMD_RETURN_NUM_OP_LIST(SIMD_RETURN_NUM)
+#undef SIMD_RETURN_NUM
+
+#define SIMD_RETURN_BOOL(Name) \
+  Type* Typer::Visitor::Type##Name(Node* node) { return Type::Boolean(); }
+MACHINE_SIMD_RETURN_BOOL_OP_LIST(SIMD_RETURN_BOOL)
+#undef SIMD_RETURN_BOOL
+
+// Heap constants.
 
 Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
   if (value->IsJSTypedArray()) {
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index a69ace9..0e34285 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -42,12 +42,14 @@
 
 class Verifier::Visitor {
  public:
-  Visitor(Zone* z, Typing typed) : zone(z), typing(typed) {}
+  Visitor(Zone* z, Typing typed, CheckInputs check_inputs)
+      : zone(z), typing(typed), check_inputs(check_inputs) {}
 
   void Check(Node* node);
 
   Zone* zone;
   Typing typing;
+  CheckInputs check_inputs;
 
  private:
   void CheckNotTyped(Node* node) {
@@ -114,8 +116,10 @@
   int control_count = node->op()->ControlInputCount();
 
   // Verify number of inputs matches up.
-  int input_count = value_count + context_count + frame_state_count +
-                    effect_count + control_count;
+  int input_count = value_count + context_count + frame_state_count;
+  if (check_inputs == kAll) {
+    input_count += effect_count + control_count;
+  }
   CHECK_EQ(input_count, node->InputCount());
 
   // Verify that frame state has been inserted for the nodes that need it.
@@ -150,20 +154,23 @@
     CHECK(IsUseDefChainLinkPresent(context, node));
   }
 
-  // Verify all effect inputs actually have an effect.
-  for (int i = 0; i < effect_count; ++i) {
-    Node* effect = NodeProperties::GetEffectInput(node);
-    CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
-    CHECK(IsDefUseChainLinkPresent(effect, node));
-    CHECK(IsUseDefChainLinkPresent(effect, node));
-  }
+  if (check_inputs == kAll) {
+    // Verify all effect inputs actually have an effect.
+    for (int i = 0; i < effect_count; ++i) {
+      Node* effect = NodeProperties::GetEffectInput(node);
+      CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
+      CHECK(IsDefUseChainLinkPresent(effect, node));
+      CHECK(IsUseDefChainLinkPresent(effect, node));
+    }
 
-  // Verify all control inputs are control nodes.
-  for (int i = 0; i < control_count; ++i) {
-    Node* control = NodeProperties::GetControlInput(node, i);
-    CheckOutput(control, node, control->op()->ControlOutputCount(), "control");
-    CHECK(IsDefUseChainLinkPresent(control, node));
-    CHECK(IsUseDefChainLinkPresent(control, node));
+    // Verify all control inputs are control nodes.
+    for (int i = 0; i < control_count; ++i) {
+      Node* control = NodeProperties::GetControlInput(node, i);
+      CheckOutput(control, node, control->op()->ControlOutputCount(),
+                  "control");
+      CHECK(IsDefUseChainLinkPresent(control, node));
+      CHECK(IsUseDefChainLinkPresent(control, node));
+    }
   }
 
   switch (node->opcode()) {
@@ -345,6 +352,10 @@
       // Type is a number.
       CheckUpperIs(node, Type::Number());
       break;
+    case IrOpcode::kRelocatableInt32Constant:
+    case IrOpcode::kRelocatableInt64Constant:
+      CHECK_EQ(0, input_count);
+      break;
     case IrOpcode::kHeapConstant:
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
@@ -406,15 +417,13 @@
       CHECK_EQ(input_count, 1 + effect_count);
       break;
     }
-    case IrOpcode::kEffectSet: {
-      CHECK_EQ(0, value_count);
-      CHECK_EQ(0, control_count);
-      CHECK_LT(1, effect_count);
-      break;
-    }
-    case IrOpcode::kGuard:
+    case IrOpcode::kTypeGuard:
       // TODO(bmeurer): what are the constraints on these?
       break;
+    case IrOpcode::kCheckPoint:
+      // Type is empty.
+      CheckNotTyped(node);
+      break;
     case IrOpcode::kBeginRegion:
       // TODO(rossberg): what are the constraints on these?
       break;
@@ -596,7 +605,6 @@
       break;
     case IrOpcode::kJSCallFunction:
     case IrOpcode::kJSCallRuntime:
-    case IrOpcode::kJSYield:
       // Type can be anything.
       CheckUpperIs(node, Type::Any());
       break;
@@ -646,17 +654,27 @@
       CheckUpperIs(node, Type::Number());
       break;
     case IrOpcode::kNumberEqual:
+      // (NumberOrUndefined, NumberOrUndefined) -> Boolean
+      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+      CheckValueInputIs(node, 1, Type::NumberOrUndefined());
+      CheckUpperIs(node, Type::Boolean());
+      break;
     case IrOpcode::kNumberLessThan:
     case IrOpcode::kNumberLessThanOrEqual:
       // (Number, Number) -> Boolean
-      CheckValueInputIs(node, 0, Type::Number());
-      CheckValueInputIs(node, 1, Type::Number());
+      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+      CheckValueInputIs(node, 1, Type::NumberOrUndefined());
       CheckUpperIs(node, Type::Boolean());
       break;
     case IrOpcode::kNumberAdd:
     case IrOpcode::kNumberSubtract:
     case IrOpcode::kNumberMultiply:
     case IrOpcode::kNumberDivide:
+      // (Number, Number) -> Number
+      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+      CheckValueInputIs(node, 1, Type::NumberOrUndefined());
+      // CheckUpperIs(node, Type::Number());
+      break;
     case IrOpcode::kNumberModulus:
       // (Number, Number) -> Number
       CheckValueInputIs(node, 0, Type::Number());
@@ -706,12 +724,12 @@
       break;
     case IrOpcode::kNumberToInt32:
       // Number -> Signed32
-      CheckValueInputIs(node, 0, Type::Number());
+      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
       CheckUpperIs(node, Type::Signed32());
       break;
     case IrOpcode::kNumberToUint32:
       // Number -> Unsigned32
-      CheckValueInputIs(node, 0, Type::Number());
+      CheckValueInputIs(node, 0, Type::NumberOrUndefined());
       CheckUpperIs(node, Type::Unsigned32());
       break;
     case IrOpcode::kNumberIsHoleNaN:
@@ -719,11 +737,6 @@
       CheckValueInputIs(node, 0, Type::Number());
       CheckUpperIs(node, Type::Boolean());
       break;
-    case IrOpcode::kPlainPrimitiveToNumber:
-      // PlainPrimitive -> Number
-      CheckValueInputIs(node, 0, Type::PlainPrimitive());
-      CheckUpperIs(node, Type::Number());
-      break;
     case IrOpcode::kStringEqual:
     case IrOpcode::kStringLessThan:
     case IrOpcode::kStringLessThanOrEqual:
@@ -743,9 +756,11 @@
       CheckUpperIs(node, Type::Boolean());
       break;
     }
+    case IrOpcode::kObjectIsCallable:
     case IrOpcode::kObjectIsNumber:
     case IrOpcode::kObjectIsReceiver:
     case IrOpcode::kObjectIsSmi:
+    case IrOpcode::kObjectIsString:
     case IrOpcode::kObjectIsUndetectable:
       CheckValueInputIs(node, 0, Type::Any());
       CheckUpperIs(node, Type::Boolean());
@@ -755,6 +770,15 @@
       CheckUpperIs(node, Type::TaggedPointer());
       break;
 
+    case IrOpcode::kChangeTaggedSignedToInt32: {
+      // Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from = Type::Intersect(Type::Signed32(), Type::Tagged());
+      // Type* to = Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
     case IrOpcode::kChangeTaggedToInt32: {
       // Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
       // TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -782,6 +806,15 @@
       // CheckUpperIs(node, to));
       break;
     }
+    case IrOpcode::kChangeInt31ToTaggedSigned: {
+      // Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from =Type::Intersect(Type::Signed31(), Type::UntaggedInt32());
+      // Type* to = Type::Intersect(Type::Signed31(), Type::Tagged());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
     case IrOpcode::kChangeInt32ToTagged: {
       // Signed32 /\ UntaggedInt32 -> Signed32 /\ Tagged
       // TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -809,7 +842,7 @@
       // CheckUpperIs(node, to));
       break;
     }
-    case IrOpcode::kChangeBoolToBit: {
+    case IrOpcode::kChangeTaggedToBit: {
       // Boolean /\ TaggedPtr -> Boolean /\ UntaggedInt1
       // TODO(neis): Activate once ChangeRepresentation works in typer.
       // Type* from = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
@@ -818,7 +851,7 @@
       // CheckUpperIs(node, to));
       break;
     }
-    case IrOpcode::kChangeBitToBool: {
+    case IrOpcode::kChangeBitToTagged: {
       // Boolean /\ UntaggedInt1 -> Boolean /\ TaggedPtr
       // TODO(neis): Activate once ChangeRepresentation works in typer.
       // Type* from = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
@@ -827,6 +860,15 @@
       // CheckUpperIs(node, to));
       break;
     }
+    case IrOpcode::kTruncateTaggedToWord32: {
+      // Number /\ Tagged -> Signed32 /\ UntaggedInt32
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from = Type::Intersect(Type::Number(), Type::Tagged());
+      // Type* to = Type::Intersect(Type::Number(), Type::UntaggedInt32());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
 
     case IrOpcode::kLoadField:
       // Object -> fieldtype
@@ -918,6 +960,7 @@
     case IrOpcode::kUint64LessThanOrEqual:
     case IrOpcode::kFloat32Add:
     case IrOpcode::kFloat32Sub:
+    case IrOpcode::kFloat32SubPreserveNan:
     case IrOpcode::kFloat32Mul:
     case IrOpcode::kFloat32Div:
     case IrOpcode::kFloat32Max:
@@ -929,6 +972,7 @@
     case IrOpcode::kFloat32LessThanOrEqual:
     case IrOpcode::kFloat64Add:
     case IrOpcode::kFloat64Sub:
+    case IrOpcode::kFloat64SubPreserveNan:
     case IrOpcode::kFloat64Mul:
     case IrOpcode::kFloat64Div:
     case IrOpcode::kFloat64Mod:
@@ -949,6 +993,7 @@
     case IrOpcode::kFloat64LessThan:
     case IrOpcode::kFloat64LessThanOrEqual:
     case IrOpcode::kTruncateInt64ToInt32:
+    case IrOpcode::kRoundFloat64ToInt32:
     case IrOpcode::kRoundInt32ToFloat32:
     case IrOpcode::kRoundInt64ToFloat32:
     case IrOpcode::kRoundInt64ToFloat64:
@@ -956,11 +1001,12 @@
     case IrOpcode::kRoundUint64ToFloat64:
     case IrOpcode::kRoundUint64ToFloat32:
     case IrOpcode::kTruncateFloat64ToFloat32:
-    case IrOpcode::kTruncateFloat64ToInt32:
+    case IrOpcode::kTruncateFloat64ToWord32:
     case IrOpcode::kBitcastFloat32ToInt32:
     case IrOpcode::kBitcastFloat64ToInt64:
     case IrOpcode::kBitcastInt32ToFloat32:
     case IrOpcode::kBitcastInt64ToFloat64:
+    case IrOpcode::kBitcastWordToTagged:
     case IrOpcode::kChangeInt32ToInt64:
     case IrOpcode::kChangeUint32ToUint64:
     case IrOpcode::kChangeInt32ToFloat64:
@@ -990,17 +1036,23 @@
     case IrOpcode::kLoadParentFramePointer:
     case IrOpcode::kCheckedLoad:
     case IrOpcode::kCheckedStore:
+    case IrOpcode::kAtomicLoad:
+    case IrOpcode::kAtomicStore:
+
+#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
+      MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
+#undef SIMD_MACHINE_OP_CASE
+
       // TODO(rossberg): Check.
       break;
   }
 }  // NOLINT(readability/fn_size)
 
-
-void Verifier::Run(Graph* graph, Typing typing) {
+void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
   CHECK_NOT_NULL(graph->start());
   CHECK_NOT_NULL(graph->end());
   Zone zone(graph->zone()->allocator());
-  Visitor visitor(&zone, typing);
+  Visitor visitor(&zone, typing, check_inputs);
   AllNodes all(&zone, graph);
   for (Node* node : all.live) visitor.Check(node);
 
diff --git a/src/compiler/verifier.h b/src/compiler/verifier.h
index 428558d..60849e0 100644
--- a/src/compiler/verifier.h
+++ b/src/compiler/verifier.h
@@ -21,8 +21,10 @@
 class Verifier {
  public:
   enum Typing { TYPED, UNTYPED };
+  enum CheckInputs { kValuesOnly, kAll };
 
-  static void Run(Graph* graph, Typing typing = TYPED);
+  static void Run(Graph* graph, Typing typing = TYPED,
+                  CheckInputs check_inputs = kAll);
 
 #ifdef DEBUG
   // Verifies consistency of node inputs and uses:
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 93d5a08..619e639 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -10,11 +10,10 @@
 #include "src/base/platform/platform.h"
 
 #include "src/compiler/access-builder.h"
-#include "src/compiler/change-lowering.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/diamond.h"
-#include "src/compiler/graph.h"
 #include "src/compiler/graph-visualizer.h"
+#include "src/compiler/graph.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/int64-lowering.h"
 #include "src/compiler/js-generic-lowering.h"
@@ -24,10 +23,8 @@
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/pipeline.h"
-#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/simplified-operator.h"
 #include "src/compiler/source-position.h"
-#include "src/compiler/typer.h"
+#include "src/compiler/zone-pool.h"
 
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
@@ -52,17 +49,11 @@
 
 namespace {
 const Operator* UnsupportedOpcode(wasm::WasmOpcode opcode) {
-  if (wasm::WasmOpcodes::IsSupported(opcode)) {
-    V8_Fatal(__FILE__, __LINE__,
-             "Unsupported opcode #%d:%s reported as supported", opcode,
-             wasm::WasmOpcodes::OpcodeName(opcode));
-  }
   V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", opcode,
            wasm::WasmOpcodes::OpcodeName(opcode));
   return nullptr;
 }
 
-
 void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
   Graph* g = jsgraph->graph();
   if (g->end()) {
@@ -83,62 +74,72 @@
   explicit WasmTrapHelper(WasmGraphBuilder* builder)
       : builder_(builder),
         jsgraph_(builder->jsgraph()),
-        graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
-    for (int i = 0; i < wasm::kTrapCount; i++) traps_[i] = nullptr;
-  }
+        graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {}
 
   // Make the current control path trap to unreachable.
-  void Unreachable() { ConnectTrap(wasm::kTrapUnreachable); }
+  void Unreachable(wasm::WasmCodePosition position) {
+    ConnectTrap(wasm::kTrapUnreachable, position);
+  }
 
   // Always trap with the given reason.
-  void TrapAlways(wasm::TrapReason reason) { ConnectTrap(reason); }
+  void TrapAlways(wasm::TrapReason reason, wasm::WasmCodePosition position) {
+    ConnectTrap(reason, position);
+  }
 
   // Add a check that traps if {node} is equal to {val}.
-  Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val) {
+  Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val,
+                   wasm::WasmCodePosition position) {
     Int32Matcher m(node);
     if (m.HasValue() && !m.Is(val)) return graph()->start();
     if (val == 0) {
-      AddTrapIfFalse(reason, node);
+      AddTrapIfFalse(reason, node, position);
     } else {
       AddTrapIfTrue(reason,
                     graph()->NewNode(jsgraph()->machine()->Word32Equal(), node,
-                                     jsgraph()->Int32Constant(val)));
+                                     jsgraph()->Int32Constant(val)),
+                    position);
     }
     return builder_->Control();
   }
 
   // Add a check that traps if {node} is zero.
-  Node* ZeroCheck32(wasm::TrapReason reason, Node* node) {
-    return TrapIfEq32(reason, node, 0);
+  Node* ZeroCheck32(wasm::TrapReason reason, Node* node,
+                    wasm::WasmCodePosition position) {
+    return TrapIfEq32(reason, node, 0, position);
   }
 
   // Add a check that traps if {node} is equal to {val}.
-  Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val) {
+  Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val,
+                   wasm::WasmCodePosition position) {
     Int64Matcher m(node);
     if (m.HasValue() && !m.Is(val)) return graph()->start();
-    AddTrapIfTrue(reason,
-                  graph()->NewNode(jsgraph()->machine()->Word64Equal(), node,
-                                   jsgraph()->Int64Constant(val)));
+    AddTrapIfTrue(reason, graph()->NewNode(jsgraph()->machine()->Word64Equal(),
+                                           node, jsgraph()->Int64Constant(val)),
+                  position);
     return builder_->Control();
   }
 
   // Add a check that traps if {node} is zero.
-  Node* ZeroCheck64(wasm::TrapReason reason, Node* node) {
-    return TrapIfEq64(reason, node, 0);
+  Node* ZeroCheck64(wasm::TrapReason reason, Node* node,
+                    wasm::WasmCodePosition position) {
+    return TrapIfEq64(reason, node, 0, position);
   }
 
   // Add a trap if {cond} is true.
-  void AddTrapIfTrue(wasm::TrapReason reason, Node* cond) {
-    AddTrapIf(reason, cond, true);
+  void AddTrapIfTrue(wasm::TrapReason reason, Node* cond,
+                     wasm::WasmCodePosition position) {
+    AddTrapIf(reason, cond, true, position);
   }
 
   // Add a trap if {cond} is false.
-  void AddTrapIfFalse(wasm::TrapReason reason, Node* cond) {
-    AddTrapIf(reason, cond, false);
+  void AddTrapIfFalse(wasm::TrapReason reason, Node* cond,
+                      wasm::WasmCodePosition position) {
+    AddTrapIf(reason, cond, false, position);
   }
 
   // Add a trap if {cond} is true or false according to {iftrue}.
-  void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue) {
+  void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
+                 wasm::WasmCodePosition position) {
     Node** effect_ptr = builder_->effect_;
     Node** control_ptr = builder_->control_;
     Node* before = *effect_ptr;
@@ -148,7 +149,7 @@
     Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
 
     *control_ptr = iftrue ? if_true : if_false;
-    ConnectTrap(reason);
+    ConnectTrap(reason, position);
     *control_ptr = iftrue ? if_false : if_true;
     *effect_ptr = before;
   }
@@ -179,49 +180,69 @@
   WasmGraphBuilder* builder_;
   JSGraph* jsgraph_;
   Graph* graph_;
-  Node* traps_[wasm::kTrapCount];
-  Node* effects_[wasm::kTrapCount];
+  Node* trap_merge_ = nullptr;
+  Node* trap_effect_;
+  Node* trap_reason_;
+  Node* trap_position_;
 
   JSGraph* jsgraph() { return jsgraph_; }
   Graph* graph() { return jsgraph_->graph(); }
   CommonOperatorBuilder* common() { return jsgraph()->common(); }
 
-  void ConnectTrap(wasm::TrapReason reason) {
-    if (traps_[reason] == nullptr) {
-      // Create trap code for the first time this trap is used.
-      return BuildTrapCode(reason);
+  void ConnectTrap(wasm::TrapReason reason, wasm::WasmCodePosition position) {
+    DCHECK(position != wasm::kNoCodePosition);
+    Node* reason_node = builder_->Int32Constant(
+        wasm::WasmOpcodes::TrapReasonToMessageId(reason));
+    Node* position_node = builder_->Int32Constant(position);
+    if (trap_merge_ == nullptr) {
+      // Create trap code for the first time.
+      return BuildTrapCode(reason_node, position_node);
     }
     // Connect the current control and effect to the existing trap code.
-    builder_->AppendToMerge(traps_[reason], builder_->Control());
-    builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
+    builder_->AppendToMerge(trap_merge_, builder_->Control());
+    builder_->AppendToPhi(trap_effect_, builder_->Effect());
+    builder_->AppendToPhi(trap_reason_, reason_node);
+    builder_->AppendToPhi(trap_position_, position_node);
   }
 
-  void BuildTrapCode(wasm::TrapReason reason) {
-    Node* exception =
-        builder_->String(wasm::WasmOpcodes::TrapReasonName(reason));
+  void BuildTrapCode(Node* reason_node, Node* position_node) {
     Node* end;
     Node** control_ptr = builder_->control_;
     Node** effect_ptr = builder_->effect_;
     wasm::ModuleEnv* module = builder_->module_;
-    *control_ptr = traps_[reason] =
+    DCHECK(trap_merge_ == NULL);
+    *control_ptr = trap_merge_ =
         graph()->NewNode(common()->Merge(1), *control_ptr);
-    *effect_ptr = effects_[reason] =
+    *effect_ptr = trap_effect_ =
         graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
+    trap_reason_ =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 1),
+                         reason_node, *control_ptr);
+    trap_position_ =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 1),
+                         position_node, *control_ptr);
+
+    Node* trap_reason_smi = builder_->BuildChangeInt32ToSmi(trap_reason_);
+    Node* trap_position_smi = builder_->BuildChangeInt32ToSmi(trap_position_);
 
     if (module && !module->instance->context.is_null()) {
       // Use the module context to call the runtime to throw an exception.
-      Runtime::FunctionId f = Runtime::kThrow;
+      Runtime::FunctionId f = Runtime::kThrowWasmError;
       const Runtime::Function* fun = Runtime::FunctionForId(f);
       CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
           jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
           CallDescriptor::kNoFlags);
+      // CEntryStubConstant nodes have to be created and cached in the main
+      // thread. At the moment this is only done for CEntryStubConstant(1).
+      DCHECK_EQ(1, fun->result_size);
       Node* inputs[] = {
           jsgraph()->CEntryStubConstant(fun->result_size),  // C entry
-          exception,                                        // exception
+          trap_reason_smi,                                  // message id
+          trap_position_smi,                                // byte position
           jsgraph()->ExternalConstant(
-              ExternalReference(f, jsgraph()->isolate())),  // ref
-          jsgraph()->Int32Constant(fun->nargs),             // arity
-          jsgraph()->Constant(module->instance->context),   // context
+              ExternalReference(f, jsgraph()->isolate())),    // ref
+          jsgraph()->Int32Constant(fun->nargs),               // arity
+          builder_->HeapConstant(module->instance->context),  // context
           *effect_ptr,
           *control_ptr};
 
@@ -247,8 +268,9 @@
   }
 };
 
-WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
-                                   wasm::FunctionSig* function_signature)
+WasmGraphBuilder::WasmGraphBuilder(
+    Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* function_signature,
+    compiler::SourcePositionTable* source_position_table)
     : zone_(zone),
       jsgraph_(jsgraph),
       module_(nullptr),
@@ -260,32 +282,28 @@
       cur_buffer_(def_buffer_),
       cur_bufsize_(kDefaultBufferSize),
       trap_(new (zone) WasmTrapHelper(this)),
-      function_signature_(function_signature) {
+      function_signature_(function_signature),
+      source_position_table_(source_position_table) {
   DCHECK_NOT_NULL(jsgraph_);
 }
 
-
 Node* WasmGraphBuilder::Error() { return jsgraph()->Dead(); }
 
-
 Node* WasmGraphBuilder::Start(unsigned params) {
   Node* start = graph()->NewNode(jsgraph()->common()->Start(params));
   graph()->SetStart(start);
   return start;
 }
 
-
 Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
   return graph()->NewNode(jsgraph()->common()->Parameter(index),
                           graph()->start());
 }
 
-
 Node* WasmGraphBuilder::Loop(Node* entry) {
   return graph()->NewNode(jsgraph()->common()->Loop(1), entry);
 }
 
-
 Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
   Node* terminate =
       graph()->NewNode(jsgraph()->common()->Terminate(), effect, control);
@@ -293,18 +311,15 @@
   return terminate;
 }
 
-
 unsigned WasmGraphBuilder::InputCount(Node* node) {
   return static_cast<unsigned>(node->InputCount());
 }
 
-
 bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
   return phi && IrOpcode::IsPhiOpcode(phi->opcode()) &&
          NodeProperties::GetControlInput(phi) == merge;
 }
 
-
 void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
   DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
   merge->AppendInput(jsgraph()->zone(), from);
@@ -313,22 +328,18 @@
       merge, jsgraph()->common()->ResizeMergeOrPhi(merge->op(), new_size));
 }
 
-
-void WasmGraphBuilder::AppendToPhi(Node* merge, Node* phi, Node* from) {
+void WasmGraphBuilder::AppendToPhi(Node* phi, Node* from) {
   DCHECK(IrOpcode::IsPhiOpcode(phi->opcode()));
-  DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
   int new_size = phi->InputCount();
   phi->InsertInput(jsgraph()->zone(), phi->InputCount() - 1, from);
   NodeProperties::ChangeOp(
       phi, jsgraph()->common()->ResizeMergeOrPhi(phi->op(), new_size));
 }
 
-
 Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
   return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
 }
 
-
 Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
                             Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
@@ -338,7 +349,6 @@
                           buf);
 }
 
-
 Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
                                   Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
@@ -348,19 +358,20 @@
                           buf);
 }
 
+Node* WasmGraphBuilder::NumberConstant(int32_t value) {
+  return jsgraph()->Constant(value);
+}
 
 Node* WasmGraphBuilder::Int32Constant(int32_t value) {
   return jsgraph()->Int32Constant(value);
 }
 
-
 Node* WasmGraphBuilder::Int64Constant(int64_t value) {
   return jsgraph()->Int64Constant(value);
 }
 
-
-Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
-                              Node* right) {
+Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
+                              wasm::WasmCodePosition position) {
   const Operator* op;
   MachineOperatorBuilder* m = jsgraph()->machine();
   switch (opcode) {
@@ -374,13 +385,13 @@
       op = m->Int32Mul();
       break;
     case wasm::kExprI32DivS:
-      return BuildI32DivS(left, right);
+      return BuildI32DivS(left, right, position);
     case wasm::kExprI32DivU:
-      return BuildI32DivU(left, right);
+      return BuildI32DivU(left, right, position);
     case wasm::kExprI32RemS:
-      return BuildI32RemS(left, right);
+      return BuildI32RemS(left, right, position);
     case wasm::kExprI32RemU:
-      return BuildI32RemU(left, right);
+      return BuildI32RemU(left, right, position);
     case wasm::kExprI32And:
       op = m->Word32And();
       break;
@@ -445,62 +456,46 @@
     case wasm::kExprI64And:
       op = m->Word64And();
       break;
-    // todo(ahaas): I added a list of missing instructions here to make merging
-    // easier when I do them one by one.
-    // kExprI64Add:
     case wasm::kExprI64Add:
       op = m->Int64Add();
       break;
-    // kExprI64Sub:
     case wasm::kExprI64Sub:
       op = m->Int64Sub();
       break;
-    // kExprI64Mul:
     case wasm::kExprI64Mul:
       op = m->Int64Mul();
       break;
-    // kExprI64DivS:
     case wasm::kExprI64DivS:
-      return BuildI64DivS(left, right);
-    // kExprI64DivU:
+      return BuildI64DivS(left, right, position);
     case wasm::kExprI64DivU:
-      return BuildI64DivU(left, right);
-    // kExprI64RemS:
+      return BuildI64DivU(left, right, position);
     case wasm::kExprI64RemS:
-      return BuildI64RemS(left, right);
-    // kExprI64RemU:
+      return BuildI64RemS(left, right, position);
     case wasm::kExprI64RemU:
-      return BuildI64RemU(left, right);
+      return BuildI64RemU(left, right, position);
     case wasm::kExprI64Ior:
       op = m->Word64Or();
       break;
-// kExprI64Xor:
     case wasm::kExprI64Xor:
       op = m->Word64Xor();
       break;
-// kExprI64Shl:
     case wasm::kExprI64Shl:
       op = m->Word64Shl();
       right = MaskShiftCount64(right);
       break;
-    // kExprI64ShrU:
     case wasm::kExprI64ShrU:
       op = m->Word64Shr();
       right = MaskShiftCount64(right);
       break;
-    // kExprI64ShrS:
     case wasm::kExprI64ShrS:
       op = m->Word64Sar();
       right = MaskShiftCount64(right);
       break;
-    // kExprI64Eq:
     case wasm::kExprI64Eq:
       op = m->Word64Equal();
       break;
-// kExprI64Ne:
     case wasm::kExprI64Ne:
       return Invert(Binop(wasm::kExprI64Eq, left, right));
-// kExprI64LtS:
     case wasm::kExprI64LtS:
       op = m->Int64LessThan();
       break;
@@ -543,7 +538,7 @@
       op = m->Float32Add();
       break;
     case wasm::kExprF32Sub:
-      op = m->Float32Sub();
+      op = m->Float32SubPreserveNan();
       break;
     case wasm::kExprF32Mul:
       op = m->Float32Mul();
@@ -574,7 +569,7 @@
       op = m->Float64Add();
       break;
     case wasm::kExprF64Sub:
-      op = m->Float64Sub();
+      op = m->Float64SubPreserveNan();
       break;
     case wasm::kExprF64Mul:
       op = m->Float64Mul();
@@ -609,23 +604,38 @@
       return BuildF32Max(left, right);
     case wasm::kExprF64Max:
       return BuildF64Max(left, right);
-    case wasm::kExprF64Pow: {
+    case wasm::kExprF64Pow:
       return BuildF64Pow(left, right);
-    }
-    case wasm::kExprF64Atan2: {
+    case wasm::kExprF64Atan2:
       return BuildF64Atan2(left, right);
-    }
-    case wasm::kExprF64Mod: {
+    case wasm::kExprF64Mod:
       return BuildF64Mod(left, right);
-    }
+    case wasm::kExprI32AsmjsDivS:
+      return BuildI32AsmjsDivS(left, right);
+    case wasm::kExprI32AsmjsDivU:
+      return BuildI32AsmjsDivU(left, right);
+    case wasm::kExprI32AsmjsRemS:
+      return BuildI32AsmjsRemS(left, right);
+    case wasm::kExprI32AsmjsRemU:
+      return BuildI32AsmjsRemU(left, right);
+    case wasm::kExprI32AsmjsStoreMem8:
+      return BuildAsmjsStoreMem(MachineType::Int8(), left, right);
+    case wasm::kExprI32AsmjsStoreMem16:
+      return BuildAsmjsStoreMem(MachineType::Int16(), left, right);
+    case wasm::kExprI32AsmjsStoreMem:
+      return BuildAsmjsStoreMem(MachineType::Int32(), left, right);
+    case wasm::kExprF32AsmjsStoreMem:
+      return BuildAsmjsStoreMem(MachineType::Float32(), left, right);
+    case wasm::kExprF64AsmjsStoreMem:
+      return BuildAsmjsStoreMem(MachineType::Float64(), left, right);
     default:
       op = UnsupportedOpcode(opcode);
   }
   return graph()->NewNode(op, left, right);
 }
 
-
-Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
+Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
+                             wasm::WasmCodePosition position) {
   const Operator* op;
   MachineOperatorBuilder* m = jsgraph()->machine();
   switch (opcode) {
@@ -649,9 +659,13 @@
       op = m->Float64Sqrt();
       break;
     case wasm::kExprI32SConvertF64:
-      return BuildI32SConvertF64(input);
+      return BuildI32SConvertF64(input, position);
     case wasm::kExprI32UConvertF64:
-      return BuildI32UConvertF64(input);
+      return BuildI32UConvertF64(input, position);
+    case wasm::kExprI32AsmjsSConvertF64:
+      return BuildI32AsmjsSConvertF64(input);
+    case wasm::kExprI32AsmjsUConvertF64:
+      return BuildI32AsmjsUConvertF64(input);
     case wasm::kExprF32ConvertF64:
       op = m->TruncateFloat64ToFloat32();
       break;
@@ -668,9 +682,13 @@
       op = m->RoundUint32ToFloat32();
       break;
     case wasm::kExprI32SConvertF32:
-      return BuildI32SConvertF32(input);
+      return BuildI32SConvertF32(input, position);
     case wasm::kExprI32UConvertF32:
-      return BuildI32UConvertF32(input);
+      return BuildI32UConvertF32(input, position);
+    case wasm::kExprI32AsmjsSConvertF32:
+      return BuildI32AsmjsSConvertF32(input);
+    case wasm::kExprI32AsmjsUConvertF32:
+      return BuildI32AsmjsUConvertF32(input);
     case wasm::kExprF64ConvertF32:
       op = m->ChangeFloat32ToFloat64();
       break;
@@ -769,31 +787,24 @@
     case wasm::kExprF64Log: {
       return BuildF64Log(input);
     }
-    // kExprI32ConvertI64:
     case wasm::kExprI32ConvertI64:
       op = m->TruncateInt64ToInt32();
       break;
-    // kExprI64SConvertI32:
     case wasm::kExprI64SConvertI32:
       op = m->ChangeInt32ToInt64();
       break;
-    // kExprI64UConvertI32:
     case wasm::kExprI64UConvertI32:
       op = m->ChangeUint32ToUint64();
       break;
-    // kExprF64ReinterpretI64:
     case wasm::kExprF64ReinterpretI64:
       op = m->BitcastInt64ToFloat64();
       break;
-    // kExprI64ReinterpretF64:
     case wasm::kExprI64ReinterpretF64:
       op = m->BitcastFloat64ToInt64();
       break;
-    // kExprI64Clz:
     case wasm::kExprI64Clz:
       op = m->Word64Clz();
       break;
-    // kExprI64Ctz:
     case wasm::kExprI64Ctz: {
       if (m->Word64Ctz().IsSupported()) {
         op = m->Word64Ctz().op();
@@ -809,7 +820,6 @@
         return BuildI64Ctz(input);
       }
     }
-    // kExprI64Popcnt:
     case wasm::kExprI64Popcnt: {
       if (m->Word64Popcnt().IsSupported()) {
         op = m->Word64Popcnt().op();
@@ -820,7 +830,6 @@
       }
       break;
     }
-    // kExprF32SConvertI64:
     case wasm::kExprI64Eqz:
       op = m->Word64Equal();
       return graph()->NewNode(op, input, jsgraph()->Int64Constant(0));
@@ -830,65 +839,64 @@
       }
       op = m->RoundInt64ToFloat32();
       break;
-    // kExprF32UConvertI64:
     case wasm::kExprF32UConvertI64:
       if (m->Is32()) {
         return BuildF32UConvertI64(input);
       }
       op = m->RoundUint64ToFloat32();
       break;
-    // kExprF64SConvertI64:
     case wasm::kExprF64SConvertI64:
       if (m->Is32()) {
         return BuildF64SConvertI64(input);
       }
       op = m->RoundInt64ToFloat64();
       break;
-    // kExprF64UConvertI64:
     case wasm::kExprF64UConvertI64:
       if (m->Is32()) {
         return BuildF64UConvertI64(input);
       }
       op = m->RoundUint64ToFloat64();
       break;
-// kExprI64SConvertF32:
-    case wasm::kExprI64SConvertF32: {
-      return BuildI64SConvertF32(input);
-    }
-    // kExprI64SConvertF64:
-    case wasm::kExprI64SConvertF64: {
-      return BuildI64SConvertF64(input);
-    }
-    // kExprI64UConvertF32:
-    case wasm::kExprI64UConvertF32: {
-      return BuildI64UConvertF32(input);
-    }
-    // kExprI64UConvertF64:
-    case wasm::kExprI64UConvertF64: {
-      return BuildI64UConvertF64(input);
-    }
+    case wasm::kExprI64SConvertF32:
+      return BuildI64SConvertF32(input, position);
+    case wasm::kExprI64SConvertF64:
+      return BuildI64SConvertF64(input, position);
+    case wasm::kExprI64UConvertF32:
+      return BuildI64UConvertF32(input, position);
+    case wasm::kExprI64UConvertF64:
+      return BuildI64UConvertF64(input, position);
+    case wasm::kExprI32AsmjsLoadMem8S:
+      return BuildAsmjsLoadMem(MachineType::Int8(), input);
+    case wasm::kExprI32AsmjsLoadMem8U:
+      return BuildAsmjsLoadMem(MachineType::Uint8(), input);
+    case wasm::kExprI32AsmjsLoadMem16S:
+      return BuildAsmjsLoadMem(MachineType::Int16(), input);
+    case wasm::kExprI32AsmjsLoadMem16U:
+      return BuildAsmjsLoadMem(MachineType::Uint16(), input);
+    case wasm::kExprI32AsmjsLoadMem:
+      return BuildAsmjsLoadMem(MachineType::Int32(), input);
+    case wasm::kExprF32AsmjsLoadMem:
+      return BuildAsmjsLoadMem(MachineType::Float32(), input);
+    case wasm::kExprF64AsmjsLoadMem:
+      return BuildAsmjsLoadMem(MachineType::Float64(), input);
     default:
       op = UnsupportedOpcode(opcode);
   }
   return graph()->NewNode(op, input);
 }
 
-
 Node* WasmGraphBuilder::Float32Constant(float value) {
   return jsgraph()->Float32Constant(value);
 }
 
-
 Node* WasmGraphBuilder::Float64Constant(double value) {
   return jsgraph()->Float64Constant(value);
 }
 
-
-Node* WasmGraphBuilder::Constant(Handle<Object> value) {
-  return jsgraph()->Constant(value);
+Node* WasmGraphBuilder::HeapConstant(Handle<HeapObject> value) {
+  return jsgraph()->HeapConstant(value);
 }
 
-
 Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
                                Node** false_node) {
   DCHECK_NOT_NULL(cond);
@@ -900,24 +908,20 @@
   return branch;
 }
 
-
 Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
   return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
 }
 
-
 Node* WasmGraphBuilder::IfValue(int32_t value, Node* sw) {
   DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
   return graph()->NewNode(jsgraph()->common()->IfValue(value), sw);
 }
 
-
 Node* WasmGraphBuilder::IfDefault(Node* sw) {
   DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
   return graph()->NewNode(jsgraph()->common()->IfDefault(), sw);
 }
 
-
 Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
   DCHECK_NOT_NULL(*control_);
   DCHECK_NOT_NULL(*effect_);
@@ -937,12 +941,10 @@
   return ret;
 }
 
-
 Node* WasmGraphBuilder::ReturnVoid() { return Return(0, Buffer(0)); }
 
-
-Node* WasmGraphBuilder::Unreachable() {
-  trap_->Unreachable();
+Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
+  trap_->Unreachable(position);
   return nullptr;
 }
 
@@ -987,7 +989,6 @@
   return result;
 }
 
-
 Node* WasmGraphBuilder::BuildF64Neg(Node* input) {
 #if WASM_64
   Node* result =
@@ -1007,7 +1008,6 @@
 #endif
 }
 
-
 Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
   Node* result = Unop(
       wasm::kExprF32ReinterpretI32,
@@ -1020,7 +1020,6 @@
   return result;
 }
 
-
 Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
 #if WASM_64
   Node* result = Unop(
@@ -1049,7 +1048,6 @@
 #endif
 }
 
-
 Node* WasmGraphBuilder::BuildF32Min(Node* left, Node* right) {
   Diamond left_le_right(graph(), jsgraph()->common(),
                         Binop(wasm::kExprF32Le, left, right));
@@ -1070,7 +1068,6 @@
               Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
 }
 
-
 Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
   Diamond left_ge_right(graph(), jsgraph()->common(),
                         Binop(wasm::kExprF32Ge, left, right));
@@ -1091,7 +1088,6 @@
               Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
 }
 
-
 Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
   Diamond left_le_right(graph(), jsgraph()->common(),
                         Binop(wasm::kExprF64Le, left, right));
@@ -1112,7 +1108,6 @@
               Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
 }
 
-
 Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
   Diamond left_ge_right(graph(), jsgraph()->common(),
                         Binop(wasm::kExprF64Ge, left, right));
@@ -1133,16 +1128,9 @@
               Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
 }
 
-
-Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
+                                            wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js()) {
-    // asm.js must use the wacky JS semantics.
-    input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
-    return graph()->NewNode(
-        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
-  }
-
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
   Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
@@ -1151,19 +1139,14 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF32SConvertI32, result);
   Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
-  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
 
   return result;
 }
 
-
-Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
+                                            wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js()) {
-    // asm.js must use the wacky JS semantics.
-    return graph()->NewNode(
-        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
-  }
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF64Trunc, input);
   Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
@@ -1172,21 +1155,14 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF64SConvertI32, result);
   Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
-  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
 
   return result;
 }
 
-
-Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
+                                            wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js()) {
-    // asm.js must use the wacky JS semantics.
-    input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
-    return graph()->NewNode(
-        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
-  }
-
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
   Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
@@ -1195,19 +1171,14 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF32UConvertI32, result);
   Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
-  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
 
   return result;
 }
 
-
-Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
+                                            wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js()) {
-    // asm.js must use the wacky JS semantics.
-    return graph()->NewNode(
-        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
-  }
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF64Trunc, input);
   Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
@@ -1216,185 +1187,82 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF64UConvertI32, result);
   Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
-  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
 
   return result;
 }
 
+Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  // asm.js must use the wacky JS semantics.
+  input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+  return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsSConvertF64(Node* input) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  // asm.js must use the wacky JS semantics.
+  return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsUConvertF32(Node* input) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  // asm.js must use the wacky JS semantics.
+  input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+  return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsUConvertF64(Node* input) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  // asm.js must use the wacky JS semantics.
+  return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
+
+Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
+                                             MachineRepresentation input_type) {
+  Node* stack_slot_param =
+      graph()->NewNode(jsgraph()->machine()->StackSlot(input_type));
+
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(input_type, kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       input, *effect_, *control_);
+
+  MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 1);
+  sig_builder.AddReturn(MachineType::Int32());
+  sig_builder.AddParam(MachineType::Pointer());
+
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node* args[] = {function, stack_slot_param};
+
+  return BuildCCall(sig_builder.Build(), args);
+}
 
 Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
-  //// Implement the following code as TF graph.
-  // value = value | (value << 1);
-  // value = value | (value << 2);
-  // value = value | (value << 4);
-  // value = value | (value << 8);
-  // value = value | (value << 16);
-  // return CountPopulation32(0xffffffff XOR value);
-
-  Node* result =
-      Binop(wasm::kExprI32Ior, input,
-            Binop(wasm::kExprI32Shl, input, jsgraph()->Int32Constant(1)));
-
-  result = Binop(wasm::kExprI32Ior, result,
-                 Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(2)));
-
-  result = Binop(wasm::kExprI32Ior, result,
-                 Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(4)));
-
-  result = Binop(wasm::kExprI32Ior, result,
-                 Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(8)));
-
-  result =
-      Binop(wasm::kExprI32Ior, result,
-            Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(16)));
-
-  result = BuildI32Popcnt(
-      Binop(wasm::kExprI32Xor, jsgraph()->Int32Constant(0xffffffff), result));
-
-  return result;
+  return BuildBitCountingCall(
+      input, ExternalReference::wasm_word32_ctz(jsgraph()->isolate()),
+      MachineRepresentation::kWord32);
 }
 
-
 Node* WasmGraphBuilder::BuildI64Ctz(Node* input) {
-  //// Implement the following code as TF graph.
-  // value = value | (value << 1);
-  // value = value | (value << 2);
-  // value = value | (value << 4);
-  // value = value | (value << 8);
-  // value = value | (value << 16);
-  // value = value | (value << 32);
-  // return CountPopulation64(0xffffffffffffffff XOR value);
-
-  Node* result =
-      Binop(wasm::kExprI64Ior, input,
-            Binop(wasm::kExprI64Shl, input, jsgraph()->Int64Constant(1)));
-
-  result = Binop(wasm::kExprI64Ior, result,
-                 Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(2)));
-
-  result = Binop(wasm::kExprI64Ior, result,
-                 Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(4)));
-
-  result = Binop(wasm::kExprI64Ior, result,
-                 Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(8)));
-
-  result =
-      Binop(wasm::kExprI64Ior, result,
-            Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(16)));
-
-  result =
-      Binop(wasm::kExprI64Ior, result,
-            Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(32)));
-
-  result = BuildI64Popcnt(Binop(
-      wasm::kExprI64Xor, jsgraph()->Int64Constant(0xffffffffffffffff), result));
-
-  return result;
+  return Unop(wasm::kExprI64UConvertI32,
+              BuildBitCountingCall(input, ExternalReference::wasm_word64_ctz(
+                                              jsgraph()->isolate()),
+                                   MachineRepresentation::kWord64));
 }
 
-
 Node* WasmGraphBuilder::BuildI32Popcnt(Node* input) {
-  //// Implement the following code as a TF graph.
-  // value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
-  // value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
-  // value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
-  // value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
-  // value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
-
-  Node* result = Binop(
-      wasm::kExprI32Add,
-      Binop(wasm::kExprI32And,
-            Binop(wasm::kExprI32ShrU, input, jsgraph()->Int32Constant(1)),
-            jsgraph()->Int32Constant(0x55555555)),
-      Binop(wasm::kExprI32And, input, jsgraph()->Int32Constant(0x55555555)));
-
-  result = Binop(
-      wasm::kExprI32Add,
-      Binop(wasm::kExprI32And,
-            Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(2)),
-            jsgraph()->Int32Constant(0x33333333)),
-      Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x33333333)));
-
-  result = Binop(
-      wasm::kExprI32Add,
-      Binop(wasm::kExprI32And,
-            Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(4)),
-            jsgraph()->Int32Constant(0x0f0f0f0f)),
-      Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0f0f0f0f)));
-
-  result = Binop(
-      wasm::kExprI32Add,
-      Binop(wasm::kExprI32And,
-            Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(8)),
-            jsgraph()->Int32Constant(0x00ff00ff)),
-      Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x00ff00ff)));
-
-  result = Binop(
-      wasm::kExprI32Add,
-      Binop(wasm::kExprI32And,
-            Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(16)),
-            jsgraph()->Int32Constant(0x0000ffff)),
-      Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0000ffff)));
-
-  return result;
+  return BuildBitCountingCall(
+      input, ExternalReference::wasm_word32_popcnt(jsgraph()->isolate()),
+      MachineRepresentation::kWord32);
 }
 
-
 Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
-  //// Implement the following code as a TF graph.
-  // value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
-  // value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
-  // value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
-  // value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
-  // value = ((value >> 16) & 0x0000ffff0000ffff) + (value &
-  // 0x0000ffff0000ffff);
-  // value = ((value >> 32) & 0x00000000ffffffff) + (value &
-  // 0x00000000ffffffff);
-
-  Node* result =
-      Binop(wasm::kExprI64Add,
-            Binop(wasm::kExprI64And,
-                  Binop(wasm::kExprI64ShrU, input, jsgraph()->Int64Constant(1)),
-                  jsgraph()->Int64Constant(0x5555555555555555)),
-            Binop(wasm::kExprI64And, input,
-                  jsgraph()->Int64Constant(0x5555555555555555)));
-
-  result = Binop(wasm::kExprI64Add,
-                 Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
-                                                jsgraph()->Int64Constant(2)),
-                       jsgraph()->Int64Constant(0x3333333333333333)),
-                 Binop(wasm::kExprI64And, result,
-                       jsgraph()->Int64Constant(0x3333333333333333)));
-
-  result = Binop(wasm::kExprI64Add,
-                 Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
-                                                jsgraph()->Int64Constant(4)),
-                       jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)),
-                 Binop(wasm::kExprI64And, result,
-                       jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)));
-
-  result = Binop(wasm::kExprI64Add,
-                 Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
-                                                jsgraph()->Int64Constant(8)),
-                       jsgraph()->Int64Constant(0x00ff00ff00ff00ff)),
-                 Binop(wasm::kExprI64And, result,
-                       jsgraph()->Int64Constant(0x00ff00ff00ff00ff)));
-
-  result = Binop(wasm::kExprI64Add,
-                 Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
-                                                jsgraph()->Int64Constant(16)),
-                       jsgraph()->Int64Constant(0x0000ffff0000ffff)),
-                 Binop(wasm::kExprI64And, result,
-                       jsgraph()->Int64Constant(0x0000ffff0000ffff)));
-
-  result = Binop(wasm::kExprI64Add,
-                 Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
-                                                jsgraph()->Int64Constant(32)),
-                       jsgraph()->Int64Constant(0x00000000ffffffff)),
-                 Binop(wasm::kExprI64And, result,
-                       jsgraph()->Int64Constant(0x00000000ffffffff)));
-
-  return result;
+  return Unop(wasm::kExprI64UConvertI32,
+              BuildBitCountingCall(input, ExternalReference::wasm_word64_popcnt(
+                                              jsgraph()->isolate()),
+                                   MachineRepresentation::kWord64));
 }
 
 Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
@@ -1635,66 +1503,70 @@
   return load;
 }
 
-Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input,
+                                            wasm::WasmCodePosition position) {
   if (jsgraph()->machine()->Is32()) {
     return BuildFloatToIntConversionInstruction(
         input, ExternalReference::wasm_float32_to_int64(jsgraph()->isolate()),
-        MachineRepresentation::kFloat32, MachineType::Int64());
+        MachineRepresentation::kFloat32, MachineType::Int64(), position);
   } else {
     Node* trunc = graph()->NewNode(
         jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
     Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
     Node* overflow =
         graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
     return result;
   }
 }
 
-Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input,
+                                            wasm::WasmCodePosition position) {
   if (jsgraph()->machine()->Is32()) {
     return BuildFloatToIntConversionInstruction(
         input, ExternalReference::wasm_float32_to_uint64(jsgraph()->isolate()),
-        MachineRepresentation::kFloat32, MachineType::Int64());
+        MachineRepresentation::kFloat32, MachineType::Int64(), position);
   } else {
     Node* trunc = graph()->NewNode(
         jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
     Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
     Node* overflow =
         graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
     return result;
   }
 }
 
-Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input,
+                                            wasm::WasmCodePosition position) {
   if (jsgraph()->machine()->Is32()) {
     return BuildFloatToIntConversionInstruction(
         input, ExternalReference::wasm_float64_to_int64(jsgraph()->isolate()),
-        MachineRepresentation::kFloat64, MachineType::Int64());
+        MachineRepresentation::kFloat64, MachineType::Int64(), position);
   } else {
     Node* trunc = graph()->NewNode(
         jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
     Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
     Node* overflow =
         graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
     return result;
   }
 }
 
-Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input,
+                                            wasm::WasmCodePosition position) {
   if (jsgraph()->machine()->Is32()) {
     return BuildFloatToIntConversionInstruction(
         input, ExternalReference::wasm_float64_to_uint64(jsgraph()->isolate()),
-        MachineRepresentation::kFloat64, MachineType::Int64());
+        MachineRepresentation::kFloat64, MachineType::Int64(), position);
   } else {
     Node* trunc = graph()->NewNode(
         jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
     Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
     Node* overflow =
         graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
     return result;
   }
 }
@@ -1702,7 +1574,7 @@
 Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
     Node* input, ExternalReference ref,
     MachineRepresentation parameter_representation,
-    const MachineType result_type) {
+    const MachineType result_type, wasm::WasmCodePosition position) {
   Node* stack_slot_param = graph()->NewNode(
       jsgraph()->machine()->StackSlot(parameter_representation));
   Node* stack_slot_result = graph()->NewNode(
@@ -1719,7 +1591,7 @@
   Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
   Node* args[] = {function, stack_slot_param, stack_slot_result};
   trap_->ZeroCheck32(wasm::kTrapFloatUnrepresentable,
-                     BuildCCall(sig_builder.Build(), args));
+                     BuildCCall(sig_builder.Build(), args), position);
   const Operator* load_op = jsgraph()->machine()->Load(result_type);
   Node* load =
       graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
@@ -1728,37 +1600,10 @@
   return load;
 }
 
-Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
+                                     wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js()) {
-    // asm.js semantics return 0 on divide or mod by zero.
-    if (m->Int32DivIsSafe()) {
-      // The hardware instruction does the right thing (e.g. arm).
-      return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
-    }
-
-    // Check denominator for zero.
-    Diamond z(
-        graph(), jsgraph()->common(),
-        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
-        BranchHint::kFalse);
-
-    // Check numerator for -1. (avoid minint / -1 case).
-    Diamond n(
-        graph(), jsgraph()->common(),
-        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
-        BranchHint::kFalse);
-
-    Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
-    Node* neg =
-        graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
-
-    return n.Phi(MachineRepresentation::kWord32, neg,
-                 z.Phi(MachineRepresentation::kWord32,
-                       jsgraph()->Int32Constant(0), div));
-  }
-
-  trap_->ZeroCheck32(wasm::kTrapDivByZero, right);
+  trap_->ZeroCheck32(wasm::kTrapDivByZero, right, position);
   Node* before = *control_;
   Node* denom_is_m1;
   Node* denom_is_not_m1;
@@ -1766,7 +1611,7 @@
       graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
       &denom_is_m1, &denom_is_not_m1);
   *control_ = denom_is_m1;
-  trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt);
+  trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
   if (*control_ != denom_is_m1) {
     *control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
                                  *control_);
@@ -1776,30 +1621,11 @@
   return graph()->NewNode(m->Int32Div(), left, right, *control_);
 }
 
-Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
+                                     wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js()) {
-    // asm.js semantics return 0 on divide or mod by zero.
-    // Explicit check for x % 0.
-    Diamond z(
-        graph(), jsgraph()->common(),
-        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
-        BranchHint::kFalse);
 
-    // Explicit check for x % -1.
-    Diamond d(
-        graph(), jsgraph()->common(),
-        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
-        BranchHint::kFalse);
-    d.Chain(z.if_false);
-
-    return z.Phi(
-        MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-        d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-              graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
-  }
-
-  trap_->ZeroCheck32(wasm::kTrapRemByZero, right);
+  trap_->ZeroCheck32(wasm::kTrapRemByZero, right, position);
 
   Diamond d(
       graph(), jsgraph()->common(),
@@ -1811,56 +1637,115 @@
                graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
 }
 
-Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right,
+                                     wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js()) {
-    // asm.js semantics return 0 on divide or mod by zero.
-    if (m->Uint32DivIsSafe()) {
-      // The hardware instruction does the right thing (e.g. arm).
-      return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
-    }
-
-    // Explicit check for x % 0.
-    Diamond z(
-        graph(), jsgraph()->common(),
-        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
-        BranchHint::kFalse);
-
-    return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-                 graph()->NewNode(jsgraph()->machine()->Uint32Div(), left,
-                                  right, z.if_false));
-  }
-  return graph()->NewNode(m->Uint32Div(), left, right,
-                          trap_->ZeroCheck32(wasm::kTrapDivByZero, right));
+  return graph()->NewNode(
+      m->Uint32Div(), left, right,
+      trap_->ZeroCheck32(wasm::kTrapDivByZero, right, position));
 }
 
-Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right,
+                                     wasm::WasmCodePosition position) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js()) {
-    // asm.js semantics return 0 on divide or mod by zero.
-    // Explicit check for x % 0.
-    Diamond z(
-        graph(), jsgraph()->common(),
-        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
-        BranchHint::kFalse);
-
-    Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
-                                 z.if_false);
-    return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-                 rem);
-  }
-
-  return graph()->NewNode(m->Uint32Mod(), left, right,
-                          trap_->ZeroCheck32(wasm::kTrapRemByZero, right));
+  return graph()->NewNode(
+      m->Uint32Mod(), left, right,
+      trap_->ZeroCheck32(wasm::kTrapRemByZero, right, position));
 }
 
-Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  // asm.js semantics return 0 on divide or mod by zero.
+  if (m->Int32DivIsSafe()) {
+    // The hardware instruction does the right thing (e.g. arm).
+    return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
+  }
+
+  // Check denominator for zero.
+  Diamond z(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+      BranchHint::kFalse);
+
+  // Check numerator for -1. (avoid minint / -1 case).
+  Diamond n(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+      BranchHint::kFalse);
+
+  Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
+  Node* neg =
+      graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
+
+  return n.Phi(
+      MachineRepresentation::kWord32, neg,
+      z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0), div));
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  // asm.js semantics return 0 on divide or mod by zero.
+  // Explicit check for x % 0.
+  Diamond z(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+      BranchHint::kFalse);
+
+  // Explicit check for x % -1.
+  Diamond d(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+      BranchHint::kFalse);
+  d.Chain(z.if_false);
+
+  return z.Phi(
+      MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+      d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+            graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  // asm.js semantics return 0 on divide or mod by zero.
+  if (m->Uint32DivIsSafe()) {
+    // The hardware instruction does the right thing (e.g. arm).
+    return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
+  }
+
+  // Explicit check for x % 0.
+  Diamond z(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+      BranchHint::kFalse);
+
+  return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+               graph()->NewNode(jsgraph()->machine()->Uint32Div(), left, right,
+                                z.if_false));
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsRemU(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  // asm.js semantics return 0 on divide or mod by zero.
+  // Explicit check for x % 0.
+  Diamond z(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+      BranchHint::kFalse);
+
+  Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
+                               z.if_false);
+  return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+               rem);
+}
+
+Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
+                                     wasm::WasmCodePosition position) {
   if (jsgraph()->machine()->Is32()) {
     return BuildDiv64Call(
         left, right, ExternalReference::wasm_int64_div(jsgraph()->isolate()),
-        MachineType::Int64(), wasm::kTrapDivByZero);
+        MachineType::Int64(), wasm::kTrapDivByZero, position);
   }
-  trap_->ZeroCheck64(wasm::kTrapDivByZero, right);
+  trap_->ZeroCheck64(wasm::kTrapDivByZero, right, position);
   Node* before = *control_;
   Node* denom_is_m1;
   Node* denom_is_not_m1;
@@ -1869,7 +1754,7 @@
          &denom_is_m1, &denom_is_not_m1);
   *control_ = denom_is_m1;
   trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
-                    std::numeric_limits<int64_t>::min());
+                    std::numeric_limits<int64_t>::min(), position);
   if (*control_ != denom_is_m1) {
     *control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
                                  *control_);
@@ -1880,13 +1765,14 @@
                           *control_);
 }
 
-Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
+                                     wasm::WasmCodePosition position) {
   if (jsgraph()->machine()->Is32()) {
     return BuildDiv64Call(
         left, right, ExternalReference::wasm_int64_mod(jsgraph()->isolate()),
-        MachineType::Int64(), wasm::kTrapRemByZero);
+        MachineType::Int64(), wasm::kTrapRemByZero, position);
   }
-  trap_->ZeroCheck64(wasm::kTrapRemByZero, right);
+  trap_->ZeroCheck64(wasm::kTrapRemByZero, right, position);
   Diamond d(jsgraph()->graph(), jsgraph()->common(),
             graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
                              jsgraph()->Int64Constant(-1)));
@@ -1898,28 +1784,33 @@
                rem);
 }
 
-Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right,
+                                     wasm::WasmCodePosition position) {
   if (jsgraph()->machine()->Is32()) {
     return BuildDiv64Call(
         left, right, ExternalReference::wasm_uint64_div(jsgraph()->isolate()),
-        MachineType::Int64(), wasm::kTrapDivByZero);
+        MachineType::Int64(), wasm::kTrapDivByZero, position);
   }
-  return graph()->NewNode(jsgraph()->machine()->Uint64Div(), left, right,
-                          trap_->ZeroCheck64(wasm::kTrapDivByZero, right));
+  return graph()->NewNode(
+      jsgraph()->machine()->Uint64Div(), left, right,
+      trap_->ZeroCheck64(wasm::kTrapDivByZero, right, position));
 }
-Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
+                                     wasm::WasmCodePosition position) {
   if (jsgraph()->machine()->Is32()) {
     return BuildDiv64Call(
         left, right, ExternalReference::wasm_uint64_mod(jsgraph()->isolate()),
-        MachineType::Int64(), wasm::kTrapRemByZero);
+        MachineType::Int64(), wasm::kTrapRemByZero, position);
   }
-  return graph()->NewNode(jsgraph()->machine()->Uint64Mod(), left, right,
-                          trap_->ZeroCheck64(wasm::kTrapRemByZero, right));
+  return graph()->NewNode(
+      jsgraph()->machine()->Uint64Mod(), left, right,
+      trap_->ZeroCheck64(wasm::kTrapRemByZero, right, position));
 }
 
 Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
                                        ExternalReference ref,
-                                       MachineType result_type, int trap_zero) {
+                                       MachineType result_type, int trap_zero,
+                                       wasm::WasmCodePosition position) {
   Node* stack_slot_dst = graph()->NewNode(
       jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
   Node* stack_slot_src = graph()->NewNode(
@@ -1946,8 +1837,8 @@
 
   // TODO(wasm): This can get simpler if we have a specialized runtime call to
   // throw WASM exceptions by trap code instead of by string.
-  trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call);
-  trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1);
+  trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call, position);
+  trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
   const Operator* load_op = jsgraph()->machine()->Load(result_type);
   Node* load =
       graph()->NewNode(load_op, stack_slot_dst, jsgraph()->Int32Constant(0),
@@ -1977,7 +1868,8 @@
   return call;
 }
 
-Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
+Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+                                      wasm::WasmCodePosition position) {
   const size_t params = sig->parameter_count();
   const size_t extra = 2;  // effect and control inputs.
   const size_t count = 1 + params + extra;
@@ -1993,32 +1885,36 @@
       wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
   const Operator* op = jsgraph()->common()->Call(descriptor);
   Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+  SetSourcePosition(call, position);
 
   *effect_ = call;
   return call;
 }
 
-Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args,
+                                   wasm::WasmCodePosition position) {
   DCHECK_NULL(args[0]);
 
   // Add code object as constant.
-  args[0] = Constant(module_->GetFunctionCode(index));
+  args[0] = HeapConstant(module_->GetFunctionCode(index));
   wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
 
-  return BuildWasmCall(sig, args);
+  return BuildWasmCall(sig, args, position);
 }
 
-Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args,
+                                   wasm::WasmCodePosition position) {
   DCHECK_NULL(args[0]);
 
   // Add code object as constant.
-  args[0] = Constant(module_->GetImportCode(index));
+  args[0] = HeapConstant(module_->GetImportCode(index));
   wasm::FunctionSig* sig = module_->GetImportSignature(index);
 
-  return BuildWasmCall(sig, args);
+  return BuildWasmCall(sig, args, position);
 }
 
-Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args,
+                                     wasm::WasmCodePosition position) {
   DCHECK_NOT_NULL(args[0]);
   DCHECK(module_ && module_->instance);
 
@@ -2033,10 +1929,10 @@
     // Bounds check against the table size.
     Node* size = Int32Constant(static_cast<int>(table_size));
     Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
-    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds);
+    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
   } else {
     // No function table. Generate a trap and return a constant.
-    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0));
+    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0), position);
     return trap_->GetTrapValue(module_->GetSignature(index));
   }
   Node* table = FunctionTable();
@@ -2056,7 +1952,7 @@
         *effect_, *control_);
     Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
                                        jsgraph()->SmiConstant(index));
-    trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match);
+    trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
   }
 
   // Load code object from the table.
@@ -2071,77 +1967,7 @@
 
   args[0] = load_code;
   wasm::FunctionSig* sig = module_->GetSignature(index);
-  return BuildWasmCall(sig, args);
-}
-
-
-Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
-  SimplifiedOperatorBuilder simplified(jsgraph()->zone());
-  switch (type) {
-    case wasm::kAstI32:
-      return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
-    case wasm::kAstI64:
-      // TODO(titzer): i64->JS has no good solution right now. Using lower 32
-      // bits.
-      node =
-          graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), node);
-      return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
-    case wasm::kAstF32:
-      node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
-                              node);
-      return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
-    case wasm::kAstF64:
-      return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
-    case wasm::kAstStmt:
-      return jsgraph()->UndefinedConstant();
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-}
-
-
-Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
-                               wasm::LocalType type) {
-  // Do a JavaScript ToNumber.
-  Node* num =
-      graph()->NewNode(jsgraph()->javascript()->ToNumber(), node, context,
-                       jsgraph()->EmptyFrameState(), *effect_, *control_);
-  *control_ = num;
-  *effect_ = num;
-
-  // Change representation.
-  SimplifiedOperatorBuilder simplified(jsgraph()->zone());
-  num = graph()->NewNode(simplified.ChangeTaggedToFloat64(), num);
-
-  switch (type) {
-    case wasm::kAstI32: {
-      num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
-                                 TruncationMode::kJavaScript),
-                             num);
-      break;
-    }
-    case wasm::kAstI64:
-      // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
-      num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
-                                 TruncationMode::kJavaScript),
-                             num);
-      num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
-      break;
-    case wasm::kAstF32:
-      num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
-                             num);
-      break;
-    case wasm::kAstF64:
-      break;
-    case wasm::kAstStmt:
-      num = jsgraph()->Int32Constant(0);
-      break;
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-  return num;
+  return BuildWasmCall(sig, args, position);
 }
 
 Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
@@ -2174,31 +2000,404 @@
   return Unop(wasm::kExprI32Eqz, node);
 }
 
+Node* WasmGraphBuilder::BuildChangeInt32ToTagged(Node* value) {
+  MachineOperatorBuilder* machine = jsgraph()->machine();
+  CommonOperatorBuilder* common = jsgraph()->common();
+
+  if (machine->Is64()) {
+    return BuildChangeInt32ToSmi(value);
+  }
+
+  Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value);
+
+  Node* ovf = graph()->NewNode(common->Projection(1), add);
+  Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), ovf,
+                                  graph()->start());
+
+  Node* if_true = graph()->NewNode(common->IfTrue(), branch);
+  Node* vtrue = BuildAllocateHeapNumberWithValue(
+      graph()->NewNode(machine->ChangeInt32ToFloat64(), value), if_true);
+
+  Node* if_false = graph()->NewNode(common->IfFalse(), branch);
+  Node* vfalse = graph()->NewNode(common->Projection(0), add);
+
+  Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
+                               vtrue, vfalse, merge);
+  return phi;
+}
+
+Node* WasmGraphBuilder::BuildChangeFloat64ToTagged(Node* value) {
+  MachineOperatorBuilder* machine = jsgraph()->machine();
+  CommonOperatorBuilder* common = jsgraph()->common();
+
+  Node* value32 = graph()->NewNode(machine->RoundFloat64ToInt32(), value);
+  Node* check_same = graph()->NewNode(
+      machine->Float64Equal(), value,
+      graph()->NewNode(machine->ChangeInt32ToFloat64(), value32));
+  Node* branch_same =
+      graph()->NewNode(common->Branch(), check_same, graph()->start());
+
+  Node* if_smi = graph()->NewNode(common->IfTrue(), branch_same);
+  Node* vsmi;
+  Node* if_box = graph()->NewNode(common->IfFalse(), branch_same);
+  Node* vbox;
+
+  // We only need to check for -0 if the {value} can potentially contain -0.
+  Node* check_zero = graph()->NewNode(machine->Word32Equal(), value32,
+                                      jsgraph()->Int32Constant(0));
+  Node* branch_zero =
+      graph()->NewNode(common->Branch(BranchHint::kFalse), check_zero, if_smi);
+
+  Node* if_zero = graph()->NewNode(common->IfTrue(), branch_zero);
+  Node* if_notzero = graph()->NewNode(common->IfFalse(), branch_zero);
+
+  // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+  Node* check_negative = graph()->NewNode(
+      machine->Int32LessThan(),
+      graph()->NewNode(machine->Float64ExtractHighWord32(), value),
+      jsgraph()->Int32Constant(0));
+  Node* branch_negative = graph()->NewNode(common->Branch(BranchHint::kFalse),
+                                           check_negative, if_zero);
+
+  Node* if_negative = graph()->NewNode(common->IfTrue(), branch_negative);
+  Node* if_notnegative = graph()->NewNode(common->IfFalse(), branch_negative);
+
+  // We need to create a box for negative 0.
+  if_smi = graph()->NewNode(common->Merge(2), if_notzero, if_notnegative);
+  if_box = graph()->NewNode(common->Merge(2), if_box, if_negative);
+
+  // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+  // machines we need to deal with potential overflow and fallback to boxing.
+  if (machine->Is64()) {
+    vsmi = BuildChangeInt32ToSmi(value32);
+  } else {
+    Node* smi_tag =
+        graph()->NewNode(machine->Int32AddWithOverflow(), value32, value32);
+
+    Node* check_ovf = graph()->NewNode(common->Projection(1), smi_tag);
+    Node* branch_ovf =
+        graph()->NewNode(common->Branch(BranchHint::kFalse), check_ovf, if_smi);
+
+    Node* if_ovf = graph()->NewNode(common->IfTrue(), branch_ovf);
+    if_box = graph()->NewNode(common->Merge(2), if_ovf, if_box);
+
+    if_smi = graph()->NewNode(common->IfFalse(), branch_ovf);
+    vsmi = graph()->NewNode(common->Projection(0), smi_tag);
+  }
+
+  // Allocate the box for the {value}.
+  vbox = BuildAllocateHeapNumberWithValue(value, if_box);
+
+  Node* control = graph()->NewNode(common->Merge(2), if_smi, if_box);
+  value = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2), vsmi,
+                           vbox, control);
+  return value;
+}
+
+Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
+  switch (type) {
+    case wasm::kAstI32:
+      return BuildChangeInt32ToTagged(node);
+    case wasm::kAstI64:
+      // TODO(titzer): i64->JS has no good solution right now. Using lower 32
+      // bits.
+      if (jsgraph()->machine()->Is64()) {
+        // On 32 bit platforms we do not have to do the truncation because the
+        // node we get in as a parameter only contains the low word anyways.
+        node = graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(),
+                                node);
+      }
+      return BuildChangeInt32ToTagged(node);
+    case wasm::kAstF32:
+      node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
+                              node);
+      return BuildChangeFloat64ToTagged(node);
+    case wasm::kAstF64:
+      return BuildChangeFloat64ToTagged(node);
+    case wasm::kAstStmt:
+      return jsgraph()->UndefinedConstant();
+    default:
+      UNREACHABLE();
+      return nullptr;
+  }
+}
+
+Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
+                                                Node* effect, Node* control) {
+  Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+      CallDescriptor::kNoFlags, Operator::kNoProperties);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+
+  Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
+                                  node, context, effect, control);
+
+  *control_ = result;
+  *effect_ = result;
+
+  return result;
+}
+
+bool CanCover(Node* value, IrOpcode::Value opcode) {
+  if (value->opcode() != opcode) return false;
+  bool first = true;
+  for (Edge const edge : value->use_edges()) {
+    if (NodeProperties::IsControlEdge(edge)) continue;
+    if (NodeProperties::IsEffectEdge(edge)) continue;
+    DCHECK(NodeProperties::IsValueEdge(edge));
+    if (!first) return false;
+    first = false;
+  }
+  return true;
+}
+
+Node* WasmGraphBuilder::BuildChangeTaggedToFloat64(Node* value) {
+  MachineOperatorBuilder* machine = jsgraph()->machine();
+  CommonOperatorBuilder* common = jsgraph()->common();
+
+  if (CanCover(value, IrOpcode::kJSToNumber)) {
+    // ChangeTaggedToFloat64(JSToNumber(x)) =>
+    //   if IsSmi(x) then ChangeSmiToFloat64(x)
+    //   else let y = JSToNumber(x) in
+    //     if IsSmi(y) then ChangeSmiToFloat64(y)
+    //     else BuildLoadHeapNumberValue(y)
+    Node* object = NodeProperties::GetValueInput(value, 0);
+    Node* context = NodeProperties::GetContextInput(value);
+    Node* frame_state = NodeProperties::GetFrameStateInput(value, 0);
+    Node* effect = NodeProperties::GetEffectInput(value);
+    Node* control = NodeProperties::GetControlInput(value);
+
+    const Operator* merge_op = common->Merge(2);
+    const Operator* ephi_op = common->EffectPhi(2);
+    const Operator* phi_op = common->Phi(MachineRepresentation::kFloat64, 2);
+
+    Node* check1 = BuildTestNotSmi(object);
+    Node* branch1 =
+        graph()->NewNode(common->Branch(BranchHint::kFalse), check1, control);
+
+    Node* if_true1 = graph()->NewNode(common->IfTrue(), branch1);
+    Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
+                                    effect, if_true1);
+    Node* etrue1 = vtrue1;
+
+    Node* check2 = BuildTestNotSmi(vtrue1);
+    Node* branch2 = graph()->NewNode(common->Branch(), check2, if_true1);
+
+    Node* if_true2 = graph()->NewNode(common->IfTrue(), branch2);
+    Node* vtrue2 = BuildLoadHeapNumberValue(vtrue1, if_true2);
+
+    Node* if_false2 = graph()->NewNode(common->IfFalse(), branch2);
+    Node* vfalse2 = BuildChangeSmiToFloat64(vtrue1);
+
+    if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
+    vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
+
+    Node* if_false1 = graph()->NewNode(common->IfFalse(), branch1);
+    Node* vfalse1 = BuildChangeSmiToFloat64(object);
+    Node* efalse1 = effect;
+
+    Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
+    Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
+    Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
+
+    // Wire the new diamond into the graph, {JSToNumber} can still throw.
+    NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
+
+    // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
+    // the node and places it inside the diamond. Come up with a helper method!
+    for (Node* use : etrue1->uses()) {
+      if (use->opcode() == IrOpcode::kIfSuccess) {
+        use->ReplaceUses(merge1);
+        NodeProperties::ReplaceControlInput(branch2, use);
+      }
+    }
+    return phi1;
+  }
+
+  Node* check = BuildTestNotSmi(value);
+  Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), check,
+                                  graph()->start());
+
+  Node* if_not_smi = graph()->NewNode(common->IfTrue(), branch);
+
+  Node* vnot_smi;
+  Node* check_undefined = graph()->NewNode(machine->WordEqual(), value,
+                                           jsgraph()->UndefinedConstant());
+  Node* branch_undefined = graph()->NewNode(common->Branch(BranchHint::kFalse),
+                                            check_undefined, if_not_smi);
+
+  Node* if_undefined = graph()->NewNode(common->IfTrue(), branch_undefined);
+  Node* vundefined =
+      jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
+
+  Node* if_not_undefined =
+      graph()->NewNode(common->IfFalse(), branch_undefined);
+  Node* vheap_number = BuildLoadHeapNumberValue(value, if_not_undefined);
+
+  if_not_smi =
+      graph()->NewNode(common->Merge(2), if_undefined, if_not_undefined);
+  vnot_smi = graph()->NewNode(common->Phi(MachineRepresentation::kFloat64, 2),
+                              vundefined, vheap_number, if_not_smi);
+
+  Node* if_smi = graph()->NewNode(common->IfFalse(), branch);
+  Node* vfrom_smi = BuildChangeSmiToFloat64(value);
+
+  Node* merge = graph()->NewNode(common->Merge(2), if_not_smi, if_smi);
+  Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kFloat64, 2),
+                               vnot_smi, vfrom_smi, merge);
+
+  return phi;
+}
+
+Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
+                               wasm::LocalType type) {
+  // Do a JavaScript ToNumber.
+  Node* num = BuildJavaScriptToNumber(node, context, *effect_, *control_);
+
+  // Change representation.
+  SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+  num = BuildChangeTaggedToFloat64(num);
+
+  switch (type) {
+    case wasm::kAstI32: {
+      num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
+                             num);
+      break;
+    }
+    case wasm::kAstI64:
+      // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
+      num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
+                             num);
+      if (jsgraph()->machine()->Is64()) {
+        // We cannot change an int32 to an int64 on a 32 bit platform. Instead
+        // we will split the parameter node later.
+        num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
+      }
+      break;
+    case wasm::kAstF32:
+      num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
+                             num);
+      break;
+    case wasm::kAstF64:
+      break;
+    case wasm::kAstStmt:
+      num = jsgraph()->Int32Constant(0);
+      break;
+    default:
+      UNREACHABLE();
+      return nullptr;
+  }
+  return num;
+}
+
+Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
+  if (jsgraph()->machine()->Is64()) {
+    value = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), value);
+  }
+  return graph()->NewNode(jsgraph()->machine()->WordShl(), value,
+                          BuildSmiShiftBitsConstant());
+}
+
+Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
+  value = graph()->NewNode(jsgraph()->machine()->WordSar(), value,
+                           BuildSmiShiftBitsConstant());
+  if (jsgraph()->machine()->Is64()) {
+    value =
+        graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), value);
+  }
+  return value;
+}
+
+Node* WasmGraphBuilder::BuildChangeSmiToFloat64(Node* value) {
+  return graph()->NewNode(jsgraph()->machine()->ChangeInt32ToFloat64(),
+                          BuildChangeSmiToInt32(value));
+}
+
+Node* WasmGraphBuilder::BuildTestNotSmi(Node* value) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+  return graph()->NewNode(jsgraph()->machine()->WordAnd(), value,
+                          jsgraph()->IntPtrConstant(kSmiTagMask));
+}
+
+Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
+  return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+Node* WasmGraphBuilder::BuildAllocateHeapNumberWithValue(Node* value,
+                                                         Node* control) {
+  MachineOperatorBuilder* machine = jsgraph()->machine();
+  CommonOperatorBuilder* common = jsgraph()->common();
+  // The AllocateHeapNumberStub does not use the context, so we can safely pass
+  // in Smi zero here.
+  Callable callable = CodeFactory::AllocateHeapNumber(jsgraph()->isolate());
+  Node* target = jsgraph()->HeapConstant(callable.code());
+  Node* context = jsgraph()->NoContextConstant();
+  Node* effect = graph()->NewNode(common->BeginRegion(), graph()->start());
+  if (!allocate_heap_number_operator_.is_set()) {
+    CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+        jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+        CallDescriptor::kNoFlags, Operator::kNoThrow);
+    allocate_heap_number_operator_.set(common->Call(descriptor));
+  }
+  Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
+                                       target, context, effect, control);
+  Node* store =
+      graph()->NewNode(machine->Store(StoreRepresentation(
+                           MachineRepresentation::kFloat64, kNoWriteBarrier)),
+                       heap_number, BuildHeapNumberValueIndexConstant(), value,
+                       heap_number, control);
+  return graph()->NewNode(common->FinishRegion(), heap_number, store);
+}
+
+Node* WasmGraphBuilder::BuildLoadHeapNumberValue(Node* value, Node* control) {
+  return graph()->NewNode(jsgraph()->machine()->Load(MachineType::Float64()),
+                          value, BuildHeapNumberValueIndexConstant(),
+                          graph()->start(), control);
+}
+
+Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
+  return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
+}
 
 void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
                                             wasm::FunctionSig* sig) {
-  int params = static_cast<int>(sig->parameter_count());
-  int count = params + 3;
+  int wasm_count = static_cast<int>(sig->parameter_count());
+  int param_count;
+  if (jsgraph()->machine()->Is64()) {
+    param_count = static_cast<int>(sig->parameter_count());
+  } else {
+    param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
+  }
+  int count = param_count + 3;
   Node** args = Buffer(count);
 
   // Build the start and the JS parameter nodes.
-  Node* start = Start(params + 5);
+  Node* start = Start(param_count + 5);
   *control_ = start;
   *effect_ = start;
   // Create the context parameter
   Node* context = graph()->NewNode(
       jsgraph()->common()->Parameter(
-          Linkage::GetJSCallContextParamIndex(params + 1), "%context"),
+          Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
       graph()->start());
 
   int pos = 0;
-  args[pos++] = Constant(wasm_code);
+  args[pos++] = HeapConstant(wasm_code);
 
   // Convert JS parameters to WASM numbers.
-  for (int i = 0; i < params; i++) {
+  for (int i = 0; i < wasm_count; i++) {
     Node* param =
         graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
-    args[pos++] = FromJS(param, context, sig->GetParam(i));
+    Node* wasm_param = FromJS(param, context, sig->GetParam(i));
+    args[pos++] = wasm_param;
+    if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
+      // We make up the high word with SAR to get the proper sign extension.
+      args[pos++] = graph()->NewNode(jsgraph()->machine()->Word32Sar(),
+                                     wasm_param, jsgraph()->Int32Constant(31));
+    }
   }
 
   args[pos++] = *effect_;
@@ -2207,9 +2406,18 @@
   // Call the WASM code.
   CallDescriptor* desc =
       wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+  if (jsgraph()->machine()->Is32()) {
+    desc = wasm::ModuleEnv::GetI32WasmCallDescriptor(jsgraph()->zone(), desc);
+  }
   Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+  Node* retval = call;
+  if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
+      sig->GetReturn(0) == wasm::kAstI64) {
+    // The return values comes as two values, we pick the low word.
+    retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval);
+  }
   Node* jsval =
-      ToJS(call, context,
+      ToJS(retval, context,
            sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
   Node* ret =
       graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
@@ -2217,20 +2425,25 @@
   MergeControlToEnd(jsgraph(), ret);
 }
 
-
 void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSFunction> function,
                                             wasm::FunctionSig* sig) {
   int js_count = function->shared()->internal_formal_parameter_count();
   int wasm_count = static_cast<int>(sig->parameter_count());
+  int param_count;
+  if (jsgraph()->machine()->Is64()) {
+    param_count = wasm_count;
+  } else {
+    param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
+  }
 
   // Build the start and the parameter nodes.
   Isolate* isolate = jsgraph()->isolate();
   CallDescriptor* desc;
-  Node* start = Start(wasm_count + 3);
+  Node* start = Start(param_count + 3);
   *effect_ = start;
   *control_ = start;
   // JS context is the last parameter.
-  Node* context = Constant(Handle<Context>(function->context(), isolate));
+  Node* context = HeapConstant(Handle<Context>(function->context(), isolate));
   Node** args = Buffer(wasm_count + 7);
 
   bool arg_count_before_args = false;
@@ -2262,9 +2475,15 @@
   args[pos++] = jsgraph()->Constant(global);
 
   // Convert WASM numbers to JS values.
+  int param_index = 0;
   for (int i = 0; i < wasm_count; i++) {
-    Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+    Node* param =
+        graph()->NewNode(jsgraph()->common()->Parameter(param_index++), start);
     args[pos++] = ToJS(param, context, sig->GetParam(i));
+    if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
+      // On 32 bit platforms we have to skip the high word of int64 parameters.
+      param_index++;
+    }
   }
 
   if (add_new_target_undefined) {
@@ -2281,30 +2500,39 @@
   Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
 
   // Convert the return value back.
+  Node* ret;
   Node* val =
       FromJS(call, context,
              sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
-  Node* ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+  if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
+      sig->GetReturn() == wasm::kAstI64) {
+    ret = graph()->NewNode(jsgraph()->common()->Return(), val,
+                           graph()->NewNode(jsgraph()->machine()->Word32Sar(),
+                                            val, jsgraph()->Int32Constant(31)),
+                           call, start);
+  } else {
+    ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+  }
 
   MergeControlToEnd(jsgraph(), ret);
 }
 
-
 Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
   DCHECK(module_ && module_->instance);
   if (offset == 0) {
     if (!mem_buffer_) {
-      mem_buffer_ = jsgraph()->IntPtrConstant(
-          reinterpret_cast<uintptr_t>(module_->instance->mem_start));
+      mem_buffer_ = jsgraph()->RelocatableIntPtrConstant(
+          reinterpret_cast<uintptr_t>(module_->instance->mem_start),
+          RelocInfo::WASM_MEMORY_REFERENCE);
     }
     return mem_buffer_;
   } else {
-    return jsgraph()->IntPtrConstant(
-        reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset));
+    return jsgraph()->RelocatableIntPtrConstant(
+        reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset),
+        RelocInfo::WASM_MEMORY_REFERENCE);
   }
 }
 
-
 Node* WasmGraphBuilder::MemSize(uint32_t offset) {
   DCHECK(module_ && module_->instance);
   uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
@@ -2316,17 +2544,15 @@
   }
 }
 
-
 Node* WasmGraphBuilder::FunctionTable() {
   DCHECK(module_ && module_->instance &&
          !module_->instance->function_table.is_null());
   if (!function_table_) {
-    function_table_ = jsgraph()->Constant(module_->instance->function_table);
+    function_table_ = HeapConstant(module_->instance->function_table);
   }
   return function_table_;
 }
 
-
 Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
   DCHECK(module_ && module_->instance && module_->instance->globals_start);
   MachineType mem_type = module_->GetGlobalType(index);
@@ -2340,7 +2566,6 @@
   return node;
 }
 
-
 Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
   DCHECK(module_ && module_->instance && module_->instance->globals_start);
   MachineType mem_type = module_->GetGlobalType(index);
@@ -2355,46 +2580,48 @@
   return node;
 }
 
-
 void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
-                                      uint32_t offset) {
-  // TODO(turbofan): fold bounds checks for constant indexes.
+                                      uint32_t offset,
+                                      wasm::WasmCodePosition position) {
   DCHECK(module_ && module_->instance);
   size_t size = module_->instance->mem_size;
   byte memsize = wasm::WasmOpcodes::MemSize(memtype);
-  Node* cond;
+
   if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
-    // The access will always throw.
-    cond = jsgraph()->Int32Constant(0);
-  } else {
-    // Check against the limit.
-    size_t limit = size - offset - memsize;
-    CHECK(limit <= kMaxUInt32);
-    cond = graph()->NewNode(
-        jsgraph()->machine()->Uint32LessThanOrEqual(), index,
-        jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
+    // The access will always throw (unless memory is grown).
+    Node* cond = jsgraph()->Int32Constant(0);
+    trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
+    return;
   }
 
-  trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond);
+  // Check against the effective size.
+  size_t effective_size = size - offset - memsize;
+  CHECK(effective_size <= kMaxUInt32);
+
+  Uint32Matcher m(index);
+  if (m.HasValue()) {
+    uint32_t value = m.Value();
+    if (value <= effective_size) {
+      // The bounds check will always succeed.
+      return;
+    }
+  }
+
+  Node* cond = graph()->NewNode(
+      jsgraph()->machine()->Uint32LessThanOrEqual(), index,
+      jsgraph()->Int32Constant(static_cast<uint32_t>(effective_size)));
+
+  trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
 }
 
-
 Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
-                                Node* index, uint32_t offset) {
+                                Node* index, uint32_t offset,
+                                wasm::WasmCodePosition position) {
   Node* load;
-
-  if (module_ && module_->asm_js()) {
-    // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
-    DCHECK_EQ(0, offset);
-    const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
-    load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
-                            *control_);
-  } else {
-    // WASM semantics throw on OOB. Introduce explicit bounds check.
-    BoundsCheckMem(memtype, index, offset);
-    load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
-                            MemBuffer(offset), index, *effect_, *control_);
-  }
+  // WASM semantics throw on OOB. Introduce explicit bounds check.
+  BoundsCheckMem(memtype, index, offset, position);
+  load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+                          MemBuffer(offset), index, *effect_, *control_);
 
   *effect_ = load;
 
@@ -2414,41 +2641,50 @@
   return load;
 }
 
-
 Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
-                                 uint32_t offset, Node* val) {
+                                 uint32_t offset, Node* val,
+                                 wasm::WasmCodePosition position) {
   Node* store;
-  if (module_ && module_->asm_js()) {
-    // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
-    DCHECK_EQ(0, offset);
-    const Operator* op =
-        jsgraph()->machine()->CheckedStore(memtype.representation());
-    store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val, *effect_,
-                             *control_);
-  } else {
-    // WASM semantics throw on OOB. Introduce explicit bounds check.
-    BoundsCheckMem(memtype, index, offset);
-    StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
-    store =
-        graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
-                         index, val, *effect_, *control_);
-  }
+  // WASM semantics throw on OOB. Introduce explicit bounds check.
+  BoundsCheckMem(memtype, index, offset, position);
+  StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+  store = graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+                           index, val, *effect_, *control_);
   *effect_ = store;
   return store;
 }
 
+Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
+  // TODO(turbofan): fold bounds checks for constant asm.js loads.
+  // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
+  const Operator* op = jsgraph()->machine()->CheckedLoad(type);
+  Node* load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
+                                *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
+                                           Node* val) {
+  // TODO(turbofan): fold bounds checks for constant asm.js stores.
+  // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
+  const Operator* op =
+      jsgraph()->machine()->CheckedStore(type.representation());
+  Node* store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val,
+                                 *effect_, *control_);
+  *effect_ = store;
+  return val;
+}
 
 void WasmGraphBuilder::PrintDebugName(Node* node) {
   PrintF("#%d:%s", node->id(), node->op()->mnemonic());
 }
 
-
 Node* WasmGraphBuilder::String(const char* string) {
   return jsgraph()->Constant(
       jsgraph()->isolate()->factory()->NewStringFromAsciiChecked(string));
 }
 
-
 Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
 
 void WasmGraphBuilder::Int64LoweringForTesting() {
@@ -2460,6 +2696,14 @@
   }
 }
 
+void WasmGraphBuilder::SetSourcePosition(Node* node,
+                                         wasm::WasmCodePosition position) {
+  DCHECK_NE(position, wasm::kNoCodePosition);
+  compiler::SourcePosition pos(position);
+  if (source_position_table_)
+    source_position_table_->SetSourcePosition(node, pos);
+}
+
 static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
                                       CompilationInfo* info,
                                       const char* message, uint32_t index,
@@ -2468,8 +2712,8 @@
   if (isolate->logger()->is_logging_code_events() ||
       isolate->cpu_profiler()->is_profiling()) {
     ScopedVector<char> buffer(128);
-    SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length,
-             func_name.name);
+    SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length(),
+             func_name.start());
     Handle<String> name_str =
         isolate->factory()->NewStringFromAsciiChecked(buffer.start());
     Handle<String> script_str =
@@ -2478,7 +2722,7 @@
     Handle<SharedFunctionInfo> shared =
         isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
     PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
-                                     info, *script_str, 0, 0));
+                                     *script_str, 0, 0));
   }
 }
 
@@ -2506,9 +2750,8 @@
   Zone zone(isolate->allocator());
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
-  JSOperatorBuilder javascript(&zone);
   MachineOperatorBuilder machine(&zone);
-  JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+  JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
 
   Node* control = nullptr;
   Node* effect = nullptr;
@@ -2523,20 +2766,6 @@
   // Run the compilation pipeline.
   //----------------------------------------------------------------------------
   {
-    // Changes lowering requires types.
-    Typer typer(isolate, &graph);
-    NodeVector roots(&zone);
-    jsgraph.GetCachedNodes(&roots);
-    typer.Run(roots);
-
-    // Run generic and change lowering.
-    JSGenericLowering generic(true, &jsgraph);
-    ChangeLowering changes(&jsgraph);
-    GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
-    graph_reducer.AddReducer(&changes);
-    graph_reducer.AddReducer(&generic);
-    graph_reducer.ReduceGraph();
-
     if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
       OFStream os(stdout);
       os << "-- Graph after change lowering -- " << std::endl;
@@ -2555,19 +2784,19 @@
 #else
         FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
 #endif
-    const char* func_name = "js-to-wasm";
+    Vector<const char> func_name = ArrayVector("js-to-wasm");
 
     static unsigned id = 0;
     Vector<char> buffer;
     if (debugging) {
       buffer = Vector<char>::New(128);
-      SNPrintF(buffer, "js-to-wasm#%d", id);
-      func_name = buffer.start();
+      int chars = SNPrintF(buffer, "js-to-wasm#%d", id);
+      func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
     }
 
     CompilationInfo info(func_name, isolate, &zone, flags);
     Handle<Code> code =
-        Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+        Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_opt_code && !code.is_null()) {
       OFStream os(stdout);
@@ -2598,9 +2827,8 @@
   Zone zone(isolate->allocator());
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
-  JSOperatorBuilder javascript(&zone);
   MachineOperatorBuilder machine(&zone);
-  JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+  JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
 
   Node* control = nullptr;
   Node* effect = nullptr;
@@ -2613,20 +2841,6 @@
 
   Handle<Code> code = Handle<Code>::null();
   {
-    // Changes lowering requires types.
-    Typer typer(isolate, &graph);
-    NodeVector roots(&zone);
-    jsgraph.GetCachedNodes(&roots);
-    typer.Run(roots);
-
-    // Run generic and change lowering.
-    JSGenericLowering generic(true, &jsgraph);
-    ChangeLowering changes(&jsgraph);
-    GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
-    graph_reducer.AddReducer(&changes);
-    graph_reducer.AddReducer(&generic);
-    graph_reducer.ReduceGraph();
-
     if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
       OFStream os(stdout);
       os << "-- Graph after change lowering -- " << std::endl;
@@ -2636,6 +2850,9 @@
     // Schedule and compile to machine code.
     CallDescriptor* incoming =
         wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+    if (machine.Is32()) {
+      incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+    }
     Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
     bool debugging =
 #if DEBUG
@@ -2643,13 +2860,13 @@
 #else
         FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
 #endif
-    const char* func_name = "wasm-to-js";
+    Vector<const char> func_name = ArrayVector("wasm-to-js");
     static unsigned id = 0;
     Vector<char> buffer;
     if (debugging) {
       buffer = Vector<char>::New(128);
-      SNPrintF(buffer, "wasm-to-js#%d", id);
-      func_name = buffer.start();
+      int chars = SNPrintF(buffer, "wasm-to-js#%d", id);
+      func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
     }
 
     CompilationInfo info(func_name, isolate, &zone, flags);
@@ -2670,40 +2887,34 @@
   return code;
 }
 
-
-// Helper function to compile a single function.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
-                                 wasm::ModuleEnv* module_env,
-                                 const wasm::WasmFunction& function) {
-  if (FLAG_trace_wasm_compiler) {
-    OFStream os(stdout);
-    os << "Compiling WASM function "
-       << wasm::WasmFunctionName(&function, module_env) << std::endl;
-    os << std::endl;
-  }
-
-  double decode_ms = 0;
+std::pair<JSGraph*, SourcePositionTable*> BuildGraphForWasmFunction(
+    JSGraph* jsgraph, wasm::ErrorThrower* thrower, Isolate* isolate,
+    wasm::ModuleEnv*& module_env, const wasm::WasmFunction* function,
+    double* decode_ms) {
   base::ElapsedTimer decode_timer;
   if (FLAG_trace_wasm_decode_time) {
     decode_timer.Start();
   }
-
   // Create a TF graph during decoding.
-  Zone zone(isolate->allocator());
-  Graph graph(&zone);
-  CommonOperatorBuilder common(&zone);
-  MachineOperatorBuilder machine(
-      &zone, MachineType::PointerRepresentation(),
-      InstructionSelector::SupportedMachineOperatorFlags());
-  JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
-  WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
+  Graph* graph = jsgraph->graph();
+  CommonOperatorBuilder* common = jsgraph->common();
+  MachineOperatorBuilder* machine = jsgraph->machine();
+  SourcePositionTable* source_position_table =
+      new (jsgraph->zone()) SourcePositionTable(graph);
+  WasmGraphBuilder builder(jsgraph->zone(), jsgraph, function->sig,
+                           source_position_table);
   wasm::FunctionBody body = {
-      module_env, function.sig, module_env->module->module_start,
-      module_env->module->module_start + function.code_start_offset,
-      module_env->module->module_start + function.code_end_offset};
+      module_env, function->sig, module_env->module->module_start,
+      module_env->module->module_start + function->code_start_offset,
+      module_env->module->module_start + function->code_end_offset};
   wasm::TreeResult result =
       wasm::BuildTFGraph(isolate->allocator(), &builder, body);
 
+  if (machine->Is32()) {
+    Int64Lowering r(graph, machine, common, jsgraph->zone(), function->sig);
+    r.LowerGraph();
+  }
+
   if (result.failed()) {
     if (FLAG_trace_wasm_compiler) {
       OFStream os(stdout);
@@ -2711,76 +2922,198 @@
     }
     // Add the function as another context for the exception
     ScopedVector<char> buffer(128);
-    wasm::WasmName name =
-        module_env->module->GetName(function.name_offset, function.name_length);
+    wasm::WasmName name = module_env->module->GetName(function->name_offset,
+                                                      function->name_length);
     SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
-             function.func_index, name.length, name.name);
-    thrower.Failed(buffer.start(), result);
-    return Handle<Code>::null();
+             function->func_index, name.length(), name.start());
+    thrower->Failed(buffer.start(), result);
+    return std::make_pair(nullptr, nullptr);
   }
-
-  int index = static_cast<int>(function.func_index);
+  int index = static_cast<int>(function->func_index);
   if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
     PrintAst(isolate->allocator(), body);
   }
-
   if (FLAG_trace_wasm_decode_time) {
-    decode_ms = decode_timer.Elapsed().InMillisecondsF();
+    *decode_ms = decode_timer.Elapsed().InMillisecondsF();
   }
-
-  base::ElapsedTimer compile_timer;
-  if (FLAG_trace_wasm_decode_time) {
-    compile_timer.Start();
-  }
-  // Run the compiler pipeline to generate machine code.
-  CallDescriptor* descriptor =
-      wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
-  if (machine.Is32()) {
-    descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
-  }
-  Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
-  // add flags here if a meaningful name is helpful for debugging.
-  bool debugging =
-#if DEBUG
-      true;
-#else
-      FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
-#endif
-  const char* func_name = "wasm";
-  Vector<char> buffer;
-  if (debugging) {
-    buffer = Vector<char>::New(128);
-    wasm::WasmName name =
-        module_env->module->GetName(function.name_offset, function.name_length);
-    SNPrintF(buffer, "WASM_function_#%d:%.*s", function.func_index, name.length,
-             name.name);
-    func_name = buffer.start();
-  }
-  CompilationInfo info(func_name, isolate, &zone, flags);
-
-  Handle<Code> code =
-      Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
-  if (debugging) {
-    buffer.Dispose();
-  }
-  if (!code.is_null()) {
-    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "WASM_function",
-                              function.func_index,
-                              module_env->module->GetName(
-                                  function.name_offset, function.name_length));
-  }
-
-  if (FLAG_trace_wasm_decode_time) {
-    double compile_ms = compile_timer.Elapsed().InMillisecondsF();
-    PrintF(
-        "wasm-compile ok: %d bytes, %0.3f ms decode, %d nodes, %0.3f ms "
-        "compile\n",
-        static_cast<int>(function.code_end_offset - function.code_start_offset),
-        decode_ms, static_cast<int>(graph.NodeCount()), compile_ms);
-  }
-  return code;
+  return std::make_pair(jsgraph, source_position_table);
 }
 
+class WasmCompilationUnit {
+ public:
+  WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
+                      wasm::ModuleEnv* module_env,
+                      const wasm::WasmFunction* function, uint32_t index)
+      : thrower_(thrower),
+        isolate_(isolate),
+        module_env_(module_env),
+        function_(function),
+        graph_zone_(new Zone(isolate->allocator())),
+        jsgraph_(new (graph_zone()) JSGraph(
+            isolate, new (graph_zone()) Graph(graph_zone()),
+            new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
+            nullptr,
+            new (graph_zone()) MachineOperatorBuilder(
+                graph_zone(), MachineType::PointerRepresentation(),
+                InstructionSelector::SupportedMachineOperatorFlags()))),
+        compilation_zone_(isolate->allocator()),
+        info_(function->name_length != 0
+                  ? module_env->module->GetNameOrNull(function->name_offset,
+                                                      function->name_length)
+                  : ArrayVector("wasm"),
+              isolate, &compilation_zone_,
+              Code::ComputeFlags(Code::WASM_FUNCTION)),
+        job_(),
+        index_(index),
+        ok_(true) {
+    // Create and cache this node in the main thread.
+    jsgraph_->CEntryStubConstant(1);
+  }
+
+  Zone* graph_zone() { return graph_zone_.get(); }
+
+  void ExecuteCompilation() {
+    // TODO(ahaas): The counters are not thread-safe at the moment.
+    //    HistogramTimerScope wasm_compile_function_time_scope(
+    //        isolate_->counters()->wasm_compile_function_time());
+    if (FLAG_trace_wasm_compiler) {
+      OFStream os(stdout);
+      os << "Compiling WASM function "
+         << wasm::WasmFunctionName(function_, module_env_) << std::endl;
+      os << std::endl;
+    }
+
+    double decode_ms = 0;
+    size_t node_count = 0;
+
+    base::SmartPointer<Zone> graph_zone(graph_zone_.Detach());
+    std::pair<JSGraph*, SourcePositionTable*> graph_result =
+        BuildGraphForWasmFunction(jsgraph_, thrower_, isolate_, module_env_,
+                                  function_, &decode_ms);
+    JSGraph* jsgraph = graph_result.first;
+    SourcePositionTable* source_positions = graph_result.second;
+
+    if (jsgraph == nullptr) {
+      ok_ = false;
+      return;
+    }
+
+    base::ElapsedTimer pipeline_timer;
+    if (FLAG_trace_wasm_decode_time) {
+      node_count = jsgraph->graph()->NodeCount();
+      pipeline_timer.Start();
+    }
+
+    // Run the compiler pipeline to generate machine code.
+    CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
+        &compilation_zone_, function_->sig);
+    if (jsgraph->machine()->Is32()) {
+      descriptor =
+          module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+    }
+    job_.Reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph->graph(),
+                                               descriptor, source_positions));
+    ok_ = job_->OptimizeGraph() == CompilationJob::SUCCEEDED;
+    // TODO(bradnelson): Improve histogram handling of size_t.
+    // TODO(ahaas): The counters are not thread-safe at the moment.
+    //    isolate_->counters()->wasm_compile_function_peak_memory_bytes()
+    // ->AddSample(
+    //        static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
+
+    if (FLAG_trace_wasm_decode_time) {
+      double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
+      PrintF(
+          "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
+          "%0.3f ms pipeline\n",
+          static_cast<int>(function_->code_end_offset -
+                           function_->code_start_offset),
+          decode_ms, node_count, pipeline_ms);
+    }
+  }
+
+  Handle<Code> FinishCompilation() {
+    if (!ok_) {
+      return Handle<Code>::null();
+    }
+    if (job_->GenerateCode() != CompilationJob::SUCCEEDED) {
+      return Handle<Code>::null();
+    }
+    base::ElapsedTimer compile_timer;
+    if (FLAG_trace_wasm_decode_time) {
+      compile_timer.Start();
+    }
+    Handle<Code> code = info_.code();
+    DCHECK(!code.is_null());
+    DCHECK(code->deoptimization_data() == nullptr ||
+           code->deoptimization_data()->length() == 0);
+    Handle<FixedArray> deopt_data =
+        isolate_->factory()->NewFixedArray(2, TENURED);
+    if (!module_env_->instance->js_object.is_null()) {
+      deopt_data->set(0, *module_env_->instance->js_object);
+    }
+    deopt_data->set(1, Smi::FromInt(function_->func_index));
+    deopt_data->set_length(2);
+    code->set_deoptimization_data(*deopt_data);
+
+    RecordFunctionCompilation(
+        Logger::FUNCTION_TAG, &info_, "WASM_function", function_->func_index,
+        module_env_->module->GetName(function_->name_offset,
+                                     function_->name_length));
+
+    if (FLAG_trace_wasm_decode_time) {
+      double compile_ms = compile_timer.Elapsed().InMillisecondsF();
+      PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
+             static_cast<int>(function_->code_end_offset -
+                              function_->code_start_offset),
+             compile_ms);
+    }
+
+    return code;
+  }
+
+  wasm::ErrorThrower* thrower_;
+  Isolate* isolate_;
+  wasm::ModuleEnv* module_env_;
+  const wasm::WasmFunction* function_;
+  // The graph zone is deallocated at the end of ExecuteCompilation.
+  base::SmartPointer<Zone> graph_zone_;
+  JSGraph* jsgraph_;
+  Zone compilation_zone_;
+  CompilationInfo info_;
+  base::SmartPointer<CompilationJob> job_;
+  uint32_t index_;
+  bool ok_;
+};
+
+WasmCompilationUnit* CreateWasmCompilationUnit(
+    wasm::ErrorThrower* thrower, Isolate* isolate, wasm::ModuleEnv* module_env,
+    const wasm::WasmFunction* function, uint32_t index) {
+  return new WasmCompilationUnit(thrower, isolate, module_env, function, index);
+}
+
+void ExecuteCompilation(WasmCompilationUnit* unit) {
+  unit->ExecuteCompilation();
+}
+
+uint32_t GetIndexOfWasmCompilationUnit(WasmCompilationUnit* unit) {
+  return unit->index_;
+}
+
+Handle<Code> FinishCompilation(WasmCompilationUnit* unit) {
+  Handle<Code> result = unit->FinishCompilation();
+  delete unit;
+  return result;
+}
+
+// Helper function to compile a single function.
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower, Isolate* isolate,
+                                 wasm::ModuleEnv* module_env,
+                                 const wasm::WasmFunction* function) {
+  WasmCompilationUnit* unit =
+      CreateWasmCompilationUnit(thrower, isolate, module_env, function, 0);
+  ExecuteCompilation(unit);
+  return FinishCompilation(unit);
+}
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index bbcafa7..93c2ae9 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -18,6 +18,9 @@
 class Node;
 class JSGraph;
 class Graph;
+class Operator;
+class SourcePositionTable;
+class WasmCompilationUnit;
 }
 
 namespace wasm {
@@ -33,9 +36,9 @@
 
 namespace compiler {
 // Compiles a single function, producing a code object.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower, Isolate* isolate,
                                  wasm::ModuleEnv* module_env,
-                                 const wasm::WasmFunction& function);
+                                 const wasm::WasmFunction* function);
 
 // Wraps a JS function, producing a code object that can be called from WASM.
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
@@ -50,12 +53,24 @@
     Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
     Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
 
+WasmCompilationUnit* CreateWasmCompilationUnit(
+    wasm::ErrorThrower* thrower, Isolate* isolate, wasm::ModuleEnv* module_env,
+    const wasm::WasmFunction* function, uint32_t index);
+
+void ExecuteCompilation(WasmCompilationUnit* unit);
+
+Handle<Code> FinishCompilation(WasmCompilationUnit* unit);
+
+uint32_t GetIndexOfWasmCompilationUnit(WasmCompilationUnit* unit);
+
 // Abstracts details of building TurboFan graph nodes for WASM to separate
 // the WASM decoder from the internal details of TurboFan.
 class WasmTrapHelper;
 class WasmGraphBuilder {
  public:
-  WasmGraphBuilder(Zone* z, JSGraph* g, wasm::FunctionSig* function_signature);
+  WasmGraphBuilder(
+      Zone* z, JSGraph* g, wasm::FunctionSig* function_signature,
+      compiler::SourcePositionTable* source_position_table = nullptr);
 
   Node** Buffer(size_t count) {
     if (count > cur_bufsize_) {
@@ -78,17 +93,20 @@
   Node* Merge(unsigned count, Node** controls);
   Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
   Node* EffectPhi(unsigned count, Node** effects, Node* control);
+  Node* NumberConstant(int32_t value);
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);
   Node* Float32Constant(float value);
   Node* Float64Constant(double value);
-  Node* Constant(Handle<Object> value);
-  Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right);
-  Node* Unop(wasm::WasmOpcode opcode, Node* input);
+  Node* HeapConstant(Handle<HeapObject> value);
+  Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
+              wasm::WasmCodePosition position = wasm::kNoCodePosition);
+  Node* Unop(wasm::WasmOpcode opcode, Node* input,
+             wasm::WasmCodePosition position = wasm::kNoCodePosition);
   unsigned InputCount(Node* node);
   bool IsPhiWithMerge(Node* phi, Node* merge);
   void AppendToMerge(Node* merge, Node* from);
-  void AppendToPhi(Node* merge, Node* phi, Node* from);
+  void AppendToPhi(Node* phi, Node* from);
 
   //-----------------------------------------------------------------------
   // Operations that read and/or write {control} and {effect}.
@@ -99,14 +117,18 @@
   Node* IfDefault(Node* sw);
   Node* Return(unsigned count, Node** vals);
   Node* ReturnVoid();
-  Node* Unreachable();
+  Node* Unreachable(wasm::WasmCodePosition position);
 
-  Node* CallDirect(uint32_t index, Node** args);
-  Node* CallImport(uint32_t index, Node** args);
-  Node* CallIndirect(uint32_t index, Node** args);
+  Node* CallDirect(uint32_t index, Node** args,
+                   wasm::WasmCodePosition position);
+  Node* CallImport(uint32_t index, Node** args,
+                   wasm::WasmCodePosition position);
+  Node* CallIndirect(uint32_t index, Node** args,
+                     wasm::WasmCodePosition position);
   void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
   void BuildWasmToJSWrapper(Handle<JSFunction> function,
                             wasm::FunctionSig* sig);
+
   Node* ToJS(Node* node, Node* context, wasm::LocalType type);
   Node* FromJS(Node* node, Node* context, wasm::LocalType type);
   Node* Invert(Node* node);
@@ -119,8 +141,9 @@
   Node* LoadGlobal(uint32_t index);
   Node* StoreGlobal(uint32_t index, Node* val);
   Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
-                uint32_t offset);
-  Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val);
+                uint32_t offset, wasm::WasmCodePosition position);
+  Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val,
+                 wasm::WasmCodePosition position);
 
   static void PrintDebugName(Node* node);
 
@@ -137,6 +160,8 @@
 
   void Int64LoweringForTesting();
 
+  void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
+
  private:
   static const int kDefaultBufferSize = 16;
   friend class WasmTrapHelper;
@@ -155,6 +180,9 @@
 
   WasmTrapHelper* trap_;
   wasm::FunctionSig* function_signature_;
+  SetOncePointer<const Operator> allocate_heap_number_operator_;
+
+  compiler::SourcePositionTable* source_position_table_ = nullptr;
 
   // Internal helper methods.
   JSGraph* jsgraph() { return jsgraph_; }
@@ -162,13 +190,15 @@
 
   Node* String(const char* string);
   Node* MemBuffer(uint32_t offset);
-  void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
+  void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
+                      wasm::WasmCodePosition position);
 
   Node* MaskShiftCount32(Node* node);
   Node* MaskShiftCount64(Node* node);
 
   Node* BuildCCall(MachineSignature* sig, Node** args);
-  Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+  Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+                      wasm::WasmCodePosition position);
 
   Node* BuildF32Neg(Node* input);
   Node* BuildF64Neg(Node* input);
@@ -178,14 +208,17 @@
   Node* BuildF32Max(Node* left, Node* right);
   Node* BuildF64Min(Node* left, Node* right);
   Node* BuildF64Max(Node* left, Node* right);
-  Node* BuildI32SConvertF32(Node* input);
-  Node* BuildI32SConvertF64(Node* input);
-  Node* BuildI32UConvertF32(Node* input);
-  Node* BuildI32UConvertF64(Node* input);
+  Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position);
+  Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position);
+  Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position);
+  Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position);
   Node* BuildI32Ctz(Node* input);
   Node* BuildI32Popcnt(Node* input);
   Node* BuildI64Ctz(Node* input);
   Node* BuildI64Popcnt(Node* input);
+  Node* BuildBitCountingCall(Node* input, ExternalReference ref,
+                             MachineRepresentation input_type);
+
   Node* BuildCFuncInstruction(ExternalReference ref, MachineType type,
                               Node* input0, Node* input1 = nullptr);
   Node* BuildF32Trunc(Node* input);
@@ -223,23 +256,52 @@
   Node* BuildFloatToIntConversionInstruction(
       Node* input, ExternalReference ref,
       MachineRepresentation parameter_representation,
-      const MachineType result_type);
-  Node* BuildI64SConvertF32(Node* input);
-  Node* BuildI64UConvertF32(Node* input);
-  Node* BuildI64SConvertF64(Node* input);
-  Node* BuildI64UConvertF64(Node* input);
+      const MachineType result_type, wasm::WasmCodePosition position);
+  Node* BuildI64SConvertF32(Node* input, wasm::WasmCodePosition position);
+  Node* BuildI64UConvertF32(Node* input, wasm::WasmCodePosition position);
+  Node* BuildI64SConvertF64(Node* input, wasm::WasmCodePosition position);
+  Node* BuildI64UConvertF64(Node* input, wasm::WasmCodePosition position);
 
-  Node* BuildI32DivS(Node* left, Node* right);
-  Node* BuildI32RemS(Node* left, Node* right);
-  Node* BuildI32DivU(Node* left, Node* right);
-  Node* BuildI32RemU(Node* left, Node* right);
+  Node* BuildI32DivS(Node* left, Node* right, wasm::WasmCodePosition position);
+  Node* BuildI32RemS(Node* left, Node* right, wasm::WasmCodePosition position);
+  Node* BuildI32DivU(Node* left, Node* right, wasm::WasmCodePosition position);
+  Node* BuildI32RemU(Node* left, Node* right, wasm::WasmCodePosition position);
 
-  Node* BuildI64DivS(Node* left, Node* right);
-  Node* BuildI64RemS(Node* left, Node* right);
-  Node* BuildI64DivU(Node* left, Node* right);
-  Node* BuildI64RemU(Node* left, Node* right);
+  Node* BuildI64DivS(Node* left, Node* right, wasm::WasmCodePosition position);
+  Node* BuildI64RemS(Node* left, Node* right, wasm::WasmCodePosition position);
+  Node* BuildI64DivU(Node* left, Node* right, wasm::WasmCodePosition position);
+  Node* BuildI64RemU(Node* left, Node* right, wasm::WasmCodePosition position);
   Node* BuildDiv64Call(Node* left, Node* right, ExternalReference ref,
-                       MachineType result_type, int trap_zero);
+                       MachineType result_type, int trap_zero,
+                       wasm::WasmCodePosition position);
+
+  Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
+                                Node* control);
+  Node* BuildChangeInt32ToTagged(Node* value);
+  Node* BuildChangeFloat64ToTagged(Node* value);
+  Node* BuildChangeTaggedToFloat64(Node* value);
+
+  Node* BuildChangeInt32ToSmi(Node* value);
+  Node* BuildChangeSmiToInt32(Node* value);
+  Node* BuildChangeSmiToFloat64(Node* value);
+  Node* BuildTestNotSmi(Node* value);
+  Node* BuildSmiShiftBitsConstant();
+
+  Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control);
+  Node* BuildLoadHeapNumberValue(Node* value, Node* control);
+  Node* BuildHeapNumberValueIndexConstant();
+
+  // Asm.js specific functionality.
+  Node* BuildI32AsmjsSConvertF32(Node* input);
+  Node* BuildI32AsmjsSConvertF64(Node* input);
+  Node* BuildI32AsmjsUConvertF32(Node* input);
+  Node* BuildI32AsmjsUConvertF64(Node* input);
+  Node* BuildI32AsmjsDivS(Node* left, Node* right);
+  Node* BuildI32AsmjsRemS(Node* left, Node* right);
+  Node* BuildI32AsmjsDivU(Node* left, Node* right);
+  Node* BuildI32AsmjsRemU(Node* left, Node* right);
+  Node* BuildAsmjsLoadMem(MachineType type, Node* index);
+  Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
 
   Node** Realloc(Node** buffer, size_t old_count, size_t new_count) {
     Node** buf = Buffer(new_count);
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index f0e14ce..41acf55 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -58,7 +58,7 @@
 // ===========================================================================
 // == ia32 ===================================================================
 // ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
 #define GP_RETURN_REGISTERS eax, edx
 #define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
 #define FP_RETURN_REGISTERS xmm1, xmm2
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 2e4eccb..a90a584 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -44,11 +44,15 @@
       DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
       return Immediate(0);
     }
+    if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+        constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+      return Immediate(constant.ToInt32(), constant.rmode());
+    }
     return Immediate(constant.ToInt32());
   }
 
   Operand ToOperand(InstructionOperand* op, int extra = 0) {
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
   }
 
@@ -341,31 +345,28 @@
     }                                                       \
   } while (0)
 
-
 #define ASSEMBLE_SSE_BINOP(asm_instr)                                   \
   do {                                                                  \
-    if (instr->InputAt(1)->IsDoubleRegister()) {                        \
+    if (instr->InputAt(1)->IsFPRegister()) {                            \
       __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
     } else {                                                            \
       __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
     }                                                                   \
   } while (0)
 
-
 #define ASSEMBLE_SSE_UNOP(asm_instr)                                    \
   do {                                                                  \
-    if (instr->InputAt(0)->IsDoubleRegister()) {                        \
+    if (instr->InputAt(0)->IsFPRegister()) {                            \
       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
     } else {                                                            \
       __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0));        \
     }                                                                   \
   } while (0)
 
-
 #define ASSEMBLE_AVX_BINOP(asm_instr)                                  \
   do {                                                                 \
     CpuFeatureScope avx_scope(masm(), AVX);                            \
-    if (instr->InputAt(1)->IsDoubleRegister()) {                       \
+    if (instr->InputAt(1)->IsFPRegister()) {                           \
       __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
                    i.InputDoubleRegister(1));                          \
     } else {                                                           \
@@ -374,13 +375,12 @@
     }                                                                  \
   } while (0)
 
-
 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
   do {                                                                       \
     auto result = i.OutputDoubleRegister();                                  \
     auto buffer = i.InputRegister(0);                                        \
     auto index1 = i.InputRegister(1);                                        \
-    auto index2 = i.InputInt32(2);                                           \
+    auto index2 = i.InputUint32(2);                                          \
     OutOfLineCode* ool;                                                      \
     if (instr->InputAt(3)->IsRegister()) {                                   \
       auto length = i.InputRegister(3);                                      \
@@ -388,9 +388,9 @@
       __ cmpl(index1, length);                                               \
       ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
     } else {                                                                 \
-      auto length = i.InputInt32(3);                                         \
+      auto length = i.InputUint32(3);                                        \
       DCHECK_LE(index2, length);                                             \
-      __ cmpq(index1, Immediate(length - index2));                           \
+      __ cmpl(index1, Immediate(length - index2));                           \
       class OutOfLineLoadFloat final : public OutOfLineCode {                \
        public:                                                               \
         OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
@@ -427,13 +427,12 @@
     __ bind(ool->exit());                                                    \
   } while (false)
 
-
 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
   do {                                                                         \
     auto result = i.OutputRegister();                                          \
     auto buffer = i.InputRegister(0);                                          \
     auto index1 = i.InputRegister(1);                                          \
-    auto index2 = i.InputInt32(2);                                             \
+    auto index2 = i.InputUint32(2);                                            \
     OutOfLineCode* ool;                                                        \
     if (instr->InputAt(3)->IsRegister()) {                                     \
       auto length = i.InputRegister(3);                                        \
@@ -441,9 +440,9 @@
       __ cmpl(index1, length);                                                 \
       ool = new (zone()) OutOfLineLoadZero(this, result);                      \
     } else {                                                                   \
-      auto length = i.InputInt32(3);                                           \
+      auto length = i.InputUint32(3);                                          \
       DCHECK_LE(index2, length);                                               \
-      __ cmpq(index1, Immediate(length - index2));                             \
+      __ cmpl(index1, Immediate(length - index2));                             \
       class OutOfLineLoadInteger final : public OutOfLineCode {                \
        public:                                                                 \
         OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
@@ -483,12 +482,11 @@
     __ bind(ool->exit());                                                      \
   } while (false)
 
-
 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
   do {                                                                       \
     auto buffer = i.InputRegister(0);                                        \
     auto index1 = i.InputRegister(1);                                        \
-    auto index2 = i.InputInt32(2);                                           \
+    auto index2 = i.InputUint32(2);                                          \
     auto value = i.InputDoubleRegister(4);                                   \
     if (instr->InputAt(3)->IsRegister()) {                                   \
       auto length = i.InputRegister(3);                                      \
@@ -499,9 +497,9 @@
       __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
       __ bind(&done);                                                        \
     } else {                                                                 \
-      auto length = i.InputInt32(3);                                         \
+      auto length = i.InputUint32(3);                                        \
       DCHECK_LE(index2, length);                                             \
-      __ cmpq(index1, Immediate(length - index2));                           \
+      __ cmpl(index1, Immediate(length - index2));                           \
       class OutOfLineStoreFloat final : public OutOfLineCode {               \
        public:                                                               \
         OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
@@ -537,12 +535,11 @@
     }                                                                        \
   } while (false)
 
-
 #define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
   do {                                                                         \
     auto buffer = i.InputRegister(0);                                          \
     auto index1 = i.InputRegister(1);                                          \
-    auto index2 = i.InputInt32(2);                                             \
+    auto index2 = i.InputUint32(2);                                            \
     if (instr->InputAt(3)->IsRegister()) {                                     \
       auto length = i.InputRegister(3);                                        \
       DCHECK_EQ(0, index2);                                                    \
@@ -552,9 +549,9 @@
       __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
       __ bind(&done);                                                          \
     } else {                                                                   \
-      auto length = i.InputInt32(3);                                           \
+      auto length = i.InputUint32(3);                                          \
       DCHECK_LE(index2, length);                                               \
-      __ cmpq(index1, Immediate(length - index2));                             \
+      __ cmpl(index1, Immediate(length - index2));                             \
       class OutOfLineStoreInteger final : public OutOfLineCode {               \
        public:                                                                 \
         OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
@@ -590,7 +587,6 @@
     }                                                                          \
   } while (false)
 
-
 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
   do {                                                           \
     if (instr->InputAt(4)->IsRegister()) {                       \
@@ -607,8 +603,6 @@
   __ popq(rbp);
 }
 
-void CodeGenerator::AssembleSetupStackPointer() {}
-
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -656,7 +650,8 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   X64OperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -695,6 +690,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!HasImmediateInput(instr, 0));
+      Register reg = i.InputRegister(0);
+      __ jmp(reg);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
@@ -768,7 +772,9 @@
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -791,10 +797,13 @@
       auto result = i.OutputRegister();
       auto input = i.InputDoubleRegister(0);
       auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+      // We use Cvttsd2siq instead of Cvttsd2si due to performance reasons. The
+      // use of Cvttsd2siq requires the movl below to avoid sign extension.
       __ Cvttsd2siq(result, input);
       __ cmpq(result, Immediate(1));
       __ j(overflow, ool->entry());
       __ bind(ool->exit());
+      __ movl(result, result);
       break;
     }
     case kArchStoreWithWriteBarrier: {
@@ -1047,14 +1056,14 @@
       break;
     }
     case kSSEFloat32ToInt32:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Cvttss2si(i.OutputRegister(), i.InputDoubleRegister(0));
       } else {
         __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
       }
       break;
     case kSSEFloat32ToUint32: {
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
       } else {
         __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1145,14 +1154,14 @@
       ASSEMBLE_SSE_UNOP(Cvtsd2ss);
       break;
     case kSSEFloat64ToInt32:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
       } else {
         __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
       }
       break;
     case kSSEFloat64ToUint32: {
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
       } else {
         __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1163,7 +1172,7 @@
       break;
     }
     case kSSEFloat32ToInt64:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
       } else {
         __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1173,7 +1182,7 @@
         Label done;
         Label fail;
         __ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
-        if (instr->InputAt(0)->IsDoubleRegister()) {
+        if (instr->InputAt(0)->IsFPRegister()) {
           __ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
         } else {
           __ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
@@ -1192,7 +1201,7 @@
       }
       break;
     case kSSEFloat64ToInt64:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
       } else {
         __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
@@ -1202,7 +1211,7 @@
         Label done;
         Label fail;
         __ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
-        if (instr->InputAt(0)->IsDoubleRegister()) {
+        if (instr->InputAt(0)->IsFPRegister()) {
           __ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
         } else {
           __ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
@@ -1228,7 +1237,7 @@
       }
       // There does not exist a Float32ToUint64 instruction, so we have to use
       // the Float32ToInt64 instruction.
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
       } else {
         __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1241,7 +1250,7 @@
       // input value was not within the positive int64 range. We subtract 2^64
       // and convert it again to see if it is within the uint64 range.
       __ Move(kScratchDoubleReg, -9223372036854775808.0f);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
       } else {
         __ addss(kScratchDoubleReg, i.InputOperand(0));
@@ -1271,7 +1280,7 @@
       }
       // There does not exist a Float64ToUint64 instruction, so we have to use
       // the Float64ToInt64 instruction.
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
       } else {
         __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1284,7 +1293,7 @@
       // input value was not within the positive int64 range. We subtract 2^64
       // and convert it again to see if it is within the uint64 range.
       __ Move(kScratchDoubleReg, -9223372036854775808.0);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
       } else {
         __ addsd(kScratchDoubleReg, i.InputOperand(0));
@@ -1369,14 +1378,14 @@
       __ Cvtqsi2ss(i.OutputDoubleRegister(), kScratchRegister);
       break;
     case kSSEFloat64ExtractLowWord32:
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ movl(i.OutputRegister(), i.InputOperand(0));
       } else {
         __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
       }
       break;
     case kSSEFloat64ExtractHighWord32:
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
       } else {
         __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
@@ -1405,7 +1414,7 @@
       break;
     case kAVXFloat32Cmp: {
       CpuFeatureScope avx_scope(masm(), AVX);
-      if (instr->InputAt(1)->IsDoubleRegister()) {
+      if (instr->InputAt(1)->IsFPRegister()) {
         __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
       } else {
         __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
@@ -1435,7 +1444,7 @@
       break;
     case kAVXFloat64Cmp: {
       CpuFeatureScope avx_scope(masm(), AVX);
-      if (instr->InputAt(1)->IsDoubleRegister()) {
+      if (instr->InputAt(1)->IsFPRegister()) {
         __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
       } else {
         __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
@@ -1468,7 +1477,7 @@
       CpuFeatureScope avx_scope(masm(), AVX);
       __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
       __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
                   i.InputDoubleRegister(0));
       } else {
@@ -1482,7 +1491,7 @@
       CpuFeatureScope avx_scope(masm(), AVX);
       __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
       __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
                   i.InputDoubleRegister(0));
       } else {
@@ -1496,7 +1505,7 @@
       CpuFeatureScope avx_scope(masm(), AVX);
       __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
       __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
                   i.InputDoubleRegister(0));
       } else {
@@ -1510,7 +1519,7 @@
       CpuFeatureScope avx_scope(masm(), AVX);
       __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
       __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
                   i.InputDoubleRegister(0));
       } else {
@@ -1612,14 +1621,14 @@
       }
       break;
     case kX64BitcastFI:
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ movl(i.OutputRegister(), i.InputOperand(0));
       } else {
         __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
       }
       break;
     case kX64BitcastDL:
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ movq(i.OutputRegister(), i.InputOperand(0));
       } else {
         __ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -1690,7 +1699,7 @@
         if (instr->InputAt(0)->IsRegister()) {
           __ pushq(i.InputRegister(0));
           frame_access_state()->IncreaseSPDelta(1);
-        } else if (instr->InputAt(0)->IsDoubleRegister()) {
+        } else if (instr->InputAt(0)->IsFPRegister()) {
           // TODO(titzer): use another machine instruction?
           __ subq(rsp, Immediate(kDoubleSize));
           frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1710,6 +1719,24 @@
       }
       break;
     }
+    case kX64Xchgb: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchgb(i.InputRegister(index), operand);
+      break;
+    }
+    case kX64Xchgw: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchgw(i.InputRegister(index), operand);
+      break;
+    }
+    case kX64Xchgl: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchgl(i.InputRegister(index), operand);
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
       break;
@@ -1755,7 +1782,18 @@
     case kX64StackCheck:
       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
       break;
+    case kAtomicLoadInt8:
+    case kAtomicLoadUint8:
+    case kAtomicLoadInt16:
+    case kAtomicLoadUint16:
+    case kAtomicLoadWord32:
+    case kAtomicStoreWord8:
+    case kAtomicStoreWord16:
+    case kAtomicStoreWord32:
+      UNREACHABLE();  // Won't be generated by instruction selector.
+      break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 
@@ -1918,12 +1956,13 @@
   __ jmp(Operand(kScratchRegister, input, times_8, 0));
 }
 
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  return kSuccess;
 }
 
 
@@ -1933,8 +1972,31 @@
 
 }  // namespace
 
+void CodeGenerator::FinishFrame(Frame* frame) {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
 
-void CodeGenerator::AssemblePrologue() {
+  const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+  if (saves_fp != 0) {
+    frame->AlignSavedCalleeRegisterSlots();
+    if (saves_fp != 0) {  // Save callee-saved XMM registers.
+      const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+      frame->AllocateSavedCalleeRegisterSlots(saves_fp_count *
+                                              (kQuadWordSize / kPointerSize));
+    }
+  }
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {  // Save callee-saved registers.
+    int count = 0;
+    for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+      if (((1 << i) & saves)) {
+        ++count;
+      }
+    }
+    frame->AllocateSavedCalleeRegisterSlots(count);
+  }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (frame_access_state()->has_frame()) {
     if (descriptor->IsCFunctionCall()) {
@@ -1946,7 +2008,8 @@
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
   }
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
+  int shrink_slots = frame()->GetSpillSlotCount();
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1957,16 +2020,12 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -=
-        static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
+    shrink_slots -= static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
   }
 
   const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
-  if (saves_fp != 0) {
-    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
-  }
-  if (stack_shrink_slots > 0) {
-    __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
+  if (shrink_slots > 0) {
+    __ subq(rsp, Immediate(shrink_slots * kPointerSize));
   }
 
   if (saves_fp != 0) {  // Save callee-saved XMM registers.
@@ -1982,8 +2041,6 @@
                 XMMRegister::from_code(i));
       slot_idx++;
     }
-    frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
-                                              (kQuadWordSize / kPointerSize));
   }
 
   const RegList saves = descriptor->CalleeSavedRegisters();
@@ -1991,7 +2048,6 @@
     for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
       if (!((1 << i) & saves)) continue;
       __ pushq(Register::from_code(i));
-      frame()->AllocateSavedCalleeRegisterSlots(1);
     }
   }
 }
@@ -2077,12 +2133,27 @@
       Register dst = destination->IsRegister() ? g.ToRegister(destination)
                                                : kScratchRegister;
       switch (src.type()) {
-        case Constant::kInt32:
-          // TODO(dcarney): don't need scratch in this case.
-          __ Set(dst, src.ToInt32());
+        case Constant::kInt32: {
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+            __ movq(dst, src.ToInt64(), src.rmode());
+          } else {
+            // TODO(dcarney): don't need scratch in this case.
+            int32_t value = src.ToInt32();
+            if (value == 0) {
+              __ xorl(dst, dst);
+            } else {
+              __ movl(dst, Immediate(value));
+            }
+          }
           break;
+        }
         case Constant::kInt64:
-          __ Set(dst, src.ToInt64());
+          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+            __ movq(dst, src.ToInt64(), src.rmode());
+          } else {
+            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            __ Set(dst, src.ToInt64());
+          }
           break;
         case Constant::kFloat32:
           __ Move(dst,
@@ -2118,38 +2189,38 @@
     } else if (src.type() == Constant::kFloat32) {
       // TODO(turbofan): Can we do better here?
       uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         __ Move(g.ToDoubleRegister(destination), src_const);
       } else {
-        DCHECK(destination->IsDoubleStackSlot());
+        DCHECK(destination->IsFPStackSlot());
         Operand dst = g.ToOperand(destination);
         __ movl(dst, Immediate(src_const));
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
       uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         __ Move(g.ToDoubleRegister(destination), src_const);
       } else {
-        DCHECK(destination->IsDoubleStackSlot());
+        DCHECK(destination->IsFPStackSlot());
         __ movq(kScratchRegister, src_const);
         __ movq(g.ToOperand(destination), kScratchRegister);
       }
     }
-  } else if (source->IsDoubleRegister()) {
+  } else if (source->IsFPRegister()) {
     XMMRegister src = g.ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       XMMRegister dst = g.ToDoubleRegister(destination);
       __ Movapd(dst, src);
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsFPStackSlot());
       Operand dst = g.ToOperand(destination);
       __ Movsd(dst, src);
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     Operand src = g.ToOperand(source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       XMMRegister dst = g.ToDoubleRegister(destination);
       __ Movsd(dst, src);
     } else {
@@ -2186,8 +2257,7 @@
     dst = g.ToOperand(destination);
     __ popq(dst);
   } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
-             (source->IsDoubleStackSlot() &&
-              destination->IsDoubleStackSlot())) {
+             (source->IsFPStackSlot() && destination->IsFPStackSlot())) {
     // Memory-memory.
     Register tmp = kScratchRegister;
     Operand src = g.ToOperand(source);
@@ -2200,7 +2270,7 @@
     frame_access_state()->IncreaseSPDelta(-1);
     dst = g.ToOperand(destination);
     __ popq(dst);
-  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+  } else if (source->IsFPRegister() && destination->IsFPRegister()) {
     // XMM register-register swap. We rely on having xmm0
     // available as a fixed scratch register.
     XMMRegister src = g.ToDoubleRegister(source);
@@ -2208,7 +2278,7 @@
     __ Movapd(xmm0, src);
     __ Movapd(src, dst);
     __ Movapd(dst, xmm0);
-  } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+  } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
     // XMM register-memory swap.  We rely on having xmm0
     // available as a fixed scratch register.
     XMMRegister src = g.ToDoubleRegister(source);
@@ -2230,9 +2300,6 @@
 }
 
 
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index bd19386..638e77b 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -141,7 +141,10 @@
   V(X64Inc32)                      \
   V(X64Push)                       \
   V(X64Poke)                       \
-  V(X64StackCheck)
+  V(X64StackCheck)                 \
+  V(X64Xchgb)                      \
+  V(X64Xchgw)                      \
+  V(X64Xchgl)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index 3c31965..6133bd8 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -168,6 +168,11 @@
     case kX64Poke:
       return kHasSideEffect;
 
+    case kX64Xchgb:
+    case kX64Xchgw:
+    case kX64Xchgl:
+      return kIsLoadOperation | kHasSideEffect;
+
 #define CASE(Name) case k##Name:
     COMMON_ARCH_OPCODE_LIST(CASE)
 #undef CASE
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index ea1d48b..47deb02 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -22,6 +22,7 @@
   bool CanBeImmediate(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kInt32Constant:
+      case IrOpcode::kRelocatableInt32Constant:
         return true;
       case IrOpcode::kInt64Constant: {
         const int64_t value = OpParameter<int64_t>(node);
@@ -36,11 +37,15 @@
     }
   }
 
-  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+                          int effect_level) {
     if (input->opcode() != IrOpcode::kLoad ||
         !selector()->CanCover(node, input)) {
       return false;
     }
+    if (effect_level != selector()->GetEffectLevel(input)) {
+      return false;
+    }
     MachineRepresentation rep =
         LoadRepresentationOf(input->op()).representation();
     switch (opcode) {
@@ -1140,15 +1145,8 @@
   VisitRO(this, node, kSSEFloat64ToFloat32);
 }
 
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      return VisitRR(this, node, kArchTruncateDoubleToI);
-    case TruncationMode::kRoundToZero:
-      return VisitRO(this, node, kSSEFloat64ToInt32);
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+  VisitRR(this, node, kArchTruncateDoubleToI);
 }
 
 
@@ -1174,6 +1172,9 @@
   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
 }
 
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  VisitRO(this, node, kSSEFloat64ToInt32);
+}
 
 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
   X64OperandGenerator g(this);
@@ -1255,6 +1256,9 @@
   VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
+}
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
@@ -1314,6 +1318,9 @@
   VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
+}
 
 void InstructionSelector::VisitFloat64Mul(Node* node) {
   VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
@@ -1545,16 +1552,22 @@
 
   // If one of the two inputs is an immediate, make sure it's on the right, or
   // if one of the two inputs is a memory operand, make sure it's on the left.
+  int effect_level = selector->GetEffectLevel(node);
+  if (cont->IsBranch()) {
+    effect_level = selector->GetEffectLevel(
+        cont->true_block()->PredecessorAt(0)->control_input());
+  }
+
   if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
-      (g.CanBeMemoryOperand(opcode, node, right) &&
-       !g.CanBeMemoryOperand(opcode, node, left))) {
+      (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
+       !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (g.CanBeMemoryOperand(opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
       return VisitCompareWithMemoryOperand(selector, opcode, left,
                                            g.UseImmediate(right), cont);
     }
@@ -1563,7 +1576,7 @@
   }
 
   // Match memory operands on left side of comparison.
-  if (g.CanBeMemoryOperand(opcode, node, left)) {
+  if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
     return VisitCompareWithMemoryOperand(selector, opcode, left,
                                          g.UseRegister(right), cont);
   }
@@ -2023,6 +2036,52 @@
        g.UseRegister(left), g.Use(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+         load_rep.representation() == MachineRepresentation::kWord16 ||
+         load_rep.representation() == MachineRepresentation::kWord32);
+  USE(load_rep);
+  VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  X64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kX64Xchgb;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kX64Xchgw;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kX64Xchgl;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  AddressingMode addressing_mode;
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  inputs[input_count++] = g.UseUniqueRegister(base);
+  if (g.CanBeImmediate(index)) {
+    inputs[input_count++] = g.UseImmediate(index);
+    addressing_mode = kMode_MRI;
+  } else {
+    inputs[input_count++] = g.UseUniqueRegister(index);
+    addressing_mode = kMode_MR1;
+  }
+  inputs[input_count++] = g.UseUniqueRegister(value);
+  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+  Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
 
 // static
 MachineOperatorBuilder::Flags
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index da7fdb4..0eef24f 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -42,7 +42,7 @@
       DCHECK(extra == 0);
       return Operand(ToRegister(op));
     }
-    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
   }
 
@@ -53,12 +53,18 @@
   }
 
   Operand HighOperand(InstructionOperand* op) {
-    DCHECK(op->IsDoubleStackSlot());
+    DCHECK(op->IsFPStackSlot());
     return ToOperand(op, kPointerSize);
   }
 
   Immediate ToImmediate(InstructionOperand* operand) {
     Constant constant = ToConstant(operand);
+    if (constant.type() == Constant::kInt32 &&
+        (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+         constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+      return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
+                       constant.rmode());
+    }
     switch (constant.type()) {
       case Constant::kInt32:
         return Immediate(constant.ToInt32());
@@ -369,11 +375,6 @@
   __ pop(ebp);
 }
 
-// For insert fninit/fld1 instructions after the Prologue
-thread_local bool is_block_0 = false;
-
-void CodeGenerator::AssembleSetupStackPointer() { is_block_0 = true; }
-
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -434,18 +435,12 @@
 }
 
 // Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+    Instruction* instr) {
   X87OperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
 
-  // Workaround for CL #35139 (https://codereview.chromium.org/1775323002)
-  if (is_block_0) {
-    __ fninit();
-    __ fld1();
-    is_block_0 = false;
-  }
-
   switch (arch_opcode) {
     case kArchCallCodeObject: {
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -463,7 +458,7 @@
       }
       RecordCallPosition(instr);
       bool double_result =
-          instr->HasOutput() && instr->Output()->IsDoubleRegister();
+          instr->HasOutput() && instr->Output()->IsFPRegister();
       if (double_result) {
         __ lea(esp, Operand(esp, -kDoubleSize));
         __ fstp_d(Operand(esp, 0));
@@ -501,6 +496,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallAddress: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      CHECK(!HasImmediateInput(instr, 0));
+      Register reg = i.InputRegister(0);
+      __ jmp(reg);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
     case kArchCallJSFunction: {
       EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
@@ -516,7 +520,7 @@
       __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
       RecordCallPosition(instr);
       bool double_result =
-          instr->HasOutput() && instr->Output()->IsDoubleRegister();
+          instr->HasOutput() && instr->Output()->IsFPRegister();
       if (double_result) {
         __ lea(esp, Operand(esp, -kDoubleSize));
         __ fstp_d(Operand(esp, 0));
@@ -577,7 +581,7 @@
         __ CallCFunction(func, num_parameters);
       }
       bool double_result =
-          instr->HasOutput() && instr->Output()->IsDoubleRegister();
+          instr->HasOutput() && instr->Output()->IsFPRegister();
       if (double_result) {
         __ lea(esp, Operand(esp, -kDoubleSize));
         __ fstp_d(Operand(esp, 0));
@@ -612,7 +616,7 @@
       int double_register_param_count = 0;
       int x87_layout = 0;
       for (size_t i = 0; i < instr->InputCount(); i++) {
-        if (instr->InputAt(i)->IsDoubleRegister()) {
+        if (instr->InputAt(i)->IsFPRegister()) {
           double_register_param_count++;
         }
       }
@@ -630,7 +634,9 @@
 
       Deoptimizer::BailoutType bailout_type =
           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      if (result != kSuccess) return result;
       break;
     }
     case kArchRet:
@@ -650,11 +656,11 @@
       }
       break;
     case kArchTruncateDoubleToI: {
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fld_d(i.InputOperand(0));
       }
       __ TruncateX87TOSToI(i.OutputRegister());
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fstp(0);
       }
       break;
@@ -900,7 +906,7 @@
       uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
       uint32_t lower = static_cast<uint32_t>(src);
       uint32_t upper = static_cast<uint32_t>(src >> 32);
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ mov(MemOperand(esp, 0), Immediate(lower));
         __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
@@ -1092,10 +1098,10 @@
       // Set the correct round mode in x87 control register
       __ X87SetRC((mode << 10));
 
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         InstructionOperand* input = instr->InputAt(0);
         USE(input);
-        DCHECK(input->IsDoubleStackSlot());
+        DCHECK(input->IsFPStackSlot());
         if (FLAG_debug_code && FLAG_enable_slow_asserts) {
           __ VerifyX87StackDepth(1);
         }
@@ -1333,13 +1339,13 @@
     }
     case kX87Float32ToFloat64: {
       InstructionOperand* input = instr->InputAt(0);
-      if (input->IsDoubleRegister()) {
+      if (input->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ fstp_s(MemOperand(esp, 0));
         __ fld_s(MemOperand(esp, 0));
         __ add(esp, Immediate(kDoubleSize));
       } else {
-        DCHECK(input->IsDoubleStackSlot());
+        DCHECK(input->IsFPStackSlot());
         if (FLAG_debug_code && FLAG_enable_slow_asserts) {
           __ VerifyX87StackDepth(1);
         }
@@ -1357,17 +1363,17 @@
       break;
     }
     case kX87Float32ToInt32: {
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fld_s(i.InputOperand(0));
       }
       __ TruncateX87TOSToI(i.OutputRegister(0));
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fstp(0);
       }
       break;
     }
     case kX87Float32ToUint32: {
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fld_s(i.InputOperand(0));
       }
       Label success;
@@ -1381,30 +1387,30 @@
       __ TruncateX87TOSToI(i.OutputRegister(0));
       __ or_(i.OutputRegister(0), Immediate(0x80000000));
       __ bind(&success);
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fstp(0);
       }
       break;
     }
     case kX87Float64ToInt32: {
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fld_d(i.InputOperand(0));
       }
       __ TruncateX87TOSToI(i.OutputRegister(0));
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fstp(0);
       }
       break;
     }
     case kX87Float64ToFloat32: {
       InstructionOperand* input = instr->InputAt(0);
-      if (input->IsDoubleRegister()) {
+      if (input->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ fstp_s(MemOperand(esp, 0));
         __ fld_s(MemOperand(esp, 0));
         __ add(esp, Immediate(kDoubleSize));
       } else {
-        DCHECK(input->IsDoubleStackSlot());
+        DCHECK(input->IsFPStackSlot());
         if (FLAG_debug_code && FLAG_enable_slow_asserts) {
           __ VerifyX87StackDepth(1);
         }
@@ -1419,7 +1425,7 @@
     }
     case kX87Float64ToUint32: {
       __ push_imm32(-2147483648);
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fld_d(i.InputOperand(0));
       }
       __ fild_s(Operand(esp, 0));
@@ -1429,13 +1435,13 @@
       __ add(esp, Immediate(kInt32Size));
       __ add(i.OutputRegister(), Immediate(0x80000000));
       __ fstp(0);
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         __ fstp(0);
       }
       break;
     }
     case kX87Float64ExtractHighWord32: {
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ fst_d(MemOperand(esp, 0));
         __ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
@@ -1443,13 +1449,13 @@
       } else {
         InstructionOperand* input = instr->InputAt(0);
         USE(input);
-        DCHECK(input->IsDoubleStackSlot());
+        DCHECK(input->IsFPStackSlot());
         __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
       }
       break;
     }
     case kX87Float64ExtractLowWord32: {
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ fst_d(MemOperand(esp, 0));
         __ mov(i.OutputRegister(), MemOperand(esp, 0));
@@ -1457,7 +1463,7 @@
       } else {
         InstructionOperand* input = instr->InputAt(0);
         USE(input);
-        DCHECK(input->IsDoubleStackSlot());
+        DCHECK(input->IsFPStackSlot());
         __ mov(i.OutputRegister(), i.InputOperand(0));
       }
       break;
@@ -1496,10 +1502,10 @@
       // Set the correct round mode in x87 control register
       __ X87SetRC((mode << 10));
 
-      if (!instr->InputAt(0)->IsDoubleRegister()) {
+      if (!instr->InputAt(0)->IsFPRegister()) {
         InstructionOperand* input = instr->InputAt(0);
         USE(input);
-        DCHECK(input->IsDoubleStackSlot());
+        DCHECK(input->IsFPStackSlot());
         if (FLAG_debug_code && FLAG_enable_slow_asserts) {
           __ VerifyX87StackDepth(1);
         }
@@ -1652,7 +1658,7 @@
       break;
     }
     case kX87Push:
-      if (instr->InputAt(0)->IsDoubleRegister()) {
+      if (instr->InputAt(0)->IsFPRegister()) {
         auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
         if (allocated.representation() == MachineRepresentation::kFloat32) {
           __ sub(esp, Immediate(kDoubleSize));
@@ -1663,7 +1669,7 @@
           __ fst_d(Operand(esp, 0));
         }
         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
-      } else if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      } else if (instr->InputAt(0)->IsFPStackSlot()) {
         auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
         if (allocated.representation() == MachineRepresentation::kFloat32) {
           __ sub(esp, Immediate(kDoubleSize));
@@ -1693,12 +1699,30 @@
       }
       break;
     }
+    case kX87Xchgb: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchg_b(i.InputRegister(index), operand);
+      break;
+    }
+    case kX87Xchgw: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchg_w(i.InputRegister(index), operand);
+      break;
+    }
+    case kX87Xchgl: {
+      size_t index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      __ xchg(i.InputRegister(index), operand);
+      break;
+    }
     case kX87PushFloat32:
       __ lea(esp, Operand(esp, -kFloatSize));
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ fld_s(i.InputOperand(0));
         __ fstp_s(MemOperand(esp, 0));
-      } else if (instr->InputAt(0)->IsDoubleRegister()) {
+      } else if (instr->InputAt(0)->IsFPRegister()) {
         __ fst_s(MemOperand(esp, 0));
       } else {
         UNREACHABLE();
@@ -1706,10 +1730,10 @@
       break;
     case kX87PushFloat64:
       __ lea(esp, Operand(esp, -kDoubleSize));
-      if (instr->InputAt(0)->IsDoubleStackSlot()) {
+      if (instr->InputAt(0)->IsFPStackSlot()) {
         __ fld_d(i.InputOperand(0));
         __ fstp_d(MemOperand(esp, 0));
-      } else if (instr->InputAt(0)->IsDoubleRegister()) {
+      } else if (instr->InputAt(0)->IsFPRegister()) {
         __ fst_d(MemOperand(esp, 0));
       } else {
         UNREACHABLE();
@@ -1761,7 +1785,18 @@
     case kCheckedStoreWord64:
       UNREACHABLE();  // currently unsupported checked int64 load/store.
       break;
+    case kAtomicLoadInt8:
+    case kAtomicLoadUint8:
+    case kAtomicLoadInt16:
+    case kAtomicLoadUint16:
+    case kAtomicLoadWord32:
+    case kAtomicStoreWord8:
+    case kAtomicStoreWord16:
+    case kAtomicStoreWord32:
+      UNREACHABLE();  // Won't be generated by instruction selector.
+      break;
   }
+  return kSuccess;
 }  // NOLINT(readability/fn_size)
 
 
@@ -1837,7 +1872,7 @@
     int double_register_param_count = 0;
     int x87_layout = 0;
     for (size_t i = 0; i < instr->InputCount(); i++) {
-      if (instr->InputAt(i)->IsDoubleRegister()) {
+      if (instr->InputAt(i)->IsFPRegister()) {
         double_register_param_count++;
       }
     }
@@ -1971,12 +2006,13 @@
   __ jmp(Operand::JumpTable(input, times_4, table));
 }
 
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  return kSuccess;
 }
 
 
@@ -2107,8 +2143,25 @@
 //                                            | RET | args |  caller frame |
 //                                            ^ esp                        ^ ebp
 
+void CodeGenerator::FinishFrame(Frame* frame) {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {  // Save callee-saved registers.
+    DCHECK(!info()->is_osr());
+    int pushed = 0;
+    for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+      if (!((1 << i) & saves)) continue;
+      ++pushed;
+    }
+    frame->AllocateSavedCalleeRegisterSlots(pushed);
+  }
 
-void CodeGenerator::AssemblePrologue() {
+  // Initailize FPU state.
+  __ fninit();
+  __ fld1();
+}
+
+void CodeGenerator::AssembleConstructFrame() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   if (frame_access_state()->has_frame()) {
     if (descriptor->IsCFunctionCall()) {
@@ -2120,7 +2173,9 @@
       __ StubPrologue(info()->GetOutputStackFrameType());
     }
   }
-  int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+  int shrink_slots = frame()->GetSpillSlotCount();
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -2131,7 +2186,7 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
 
     // Initailize FPU state.
     __ fninit();
@@ -2139,8 +2194,8 @@
   }
 
   const RegList saves = descriptor->CalleeSavedRegisters();
-  if (stack_shrink_slots > 0) {
-    __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+  if (shrink_slots > 0) {
+    __ sub(esp, Immediate(shrink_slots * kPointerSize));
   }
 
   if (saves != 0) {  // Save callee-saved registers.
@@ -2151,7 +2206,6 @@
       __ push(Register::from_code(i));
       ++pushed;
     }
-    frame()->AllocateSavedCalleeRegisterSlots(pushed);
   }
 }
 
@@ -2263,7 +2317,7 @@
     } else if (src_constant.type() == Constant::kFloat32) {
       // TODO(turbofan): Can we do better here?
       uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         __ sub(esp, Immediate(kInt32Size));
         __ mov(MemOperand(esp, 0), Immediate(src));
         // always only push one value into the x87 stack.
@@ -2271,7 +2325,7 @@
         __ fld_s(MemOperand(esp, 0));
         __ add(esp, Immediate(kInt32Size));
       } else {
-        DCHECK(destination->IsDoubleStackSlot());
+        DCHECK(destination->IsFPStackSlot());
         Operand dst = g.ToOperand(destination);
         __ Move(dst, Immediate(src));
       }
@@ -2280,7 +2334,7 @@
       uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
       uint32_t lower = static_cast<uint32_t>(src);
       uint32_t upper = static_cast<uint32_t>(src >> 32);
-      if (destination->IsDoubleRegister()) {
+      if (destination->IsFPRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
         __ mov(MemOperand(esp, 0), Immediate(lower));
         __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
@@ -2289,15 +2343,15 @@
         __ fld_d(MemOperand(esp, 0));
         __ add(esp, Immediate(kDoubleSize));
       } else {
-        DCHECK(destination->IsDoubleStackSlot());
+        DCHECK(destination->IsFPStackSlot());
         Operand dst0 = g.ToOperand(destination);
         Operand dst1 = g.HighOperand(destination);
         __ Move(dst0, Immediate(lower));
         __ Move(dst1, Immediate(upper));
       }
     }
-  } else if (source->IsDoubleRegister()) {
-    DCHECK(destination->IsDoubleStackSlot());
+  } else if (source->IsFPRegister()) {
+    DCHECK(destination->IsFPStackSlot());
     Operand dst = g.ToOperand(destination);
     auto allocated = AllocatedOperand::cast(*source);
     switch (allocated.representation()) {
@@ -2310,11 +2364,11 @@
       default:
         UNREACHABLE();
     }
-  } else if (source->IsDoubleStackSlot()) {
-    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+  } else if (source->IsFPStackSlot()) {
+    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
     Operand src = g.ToOperand(source);
     auto allocated = AllocatedOperand::cast(*source);
-    if (destination->IsDoubleRegister()) {
+    if (destination->IsFPRegister()) {
       // always only push one value into the x87 stack.
       __ fstp(0);
       switch (allocated.representation()) {
@@ -2373,9 +2427,9 @@
     frame_access_state()->IncreaseSPDelta(-1);
     Operand src2 = g.ToOperand(source);
     __ pop(src2);
-  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+  } else if (source->IsFPRegister() && destination->IsFPRegister()) {
     UNREACHABLE();
-  } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+  } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
     auto allocated = AllocatedOperand::cast(*source);
     switch (allocated.representation()) {
       case MachineRepresentation::kFloat32:
@@ -2391,7 +2445,7 @@
       default:
         UNREACHABLE();
     }
-  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+  } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
     auto allocated = AllocatedOperand::cast(*source);
     switch (allocated.representation()) {
       case MachineRepresentation::kFloat32:
@@ -2423,9 +2477,6 @@
 }
 
 
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
     return;
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index d70a737..0cf9f35 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -96,7 +96,10 @@
   V(X87PushFloat64)                \
   V(X87PushFloat32)                \
   V(X87Poke)                       \
-  V(X87StackCheck)
+  V(X87StackCheck)                 \
+  V(X87Xchgb)                      \
+  V(X87Xchgw)                      \
+  V(X87Xchgl)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index e4d085e..a99e7a6 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -27,11 +27,15 @@
     return DefineAsRegister(node);
   }
 
-  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+                          int effect_level) {
     if (input->opcode() != IrOpcode::kLoad ||
         !selector()->CanCover(node, input)) {
       return false;
     }
+    if (effect_level != selector()->GetEffectLevel(input)) {
+      return false;
+    }
     MachineRepresentation rep =
         LoadRepresentationOf(input->op()).representation();
     switch (opcode) {
@@ -60,13 +64,20 @@
       case IrOpcode::kInt32Constant:
       case IrOpcode::kNumberConstant:
       case IrOpcode::kExternalConstant:
+      case IrOpcode::kRelocatableInt32Constant:
+      case IrOpcode::kRelocatableInt64Constant:
         return true;
       case IrOpcode::kHeapConstant: {
+// TODO(bmeurer): We must not dereference handles concurrently. If we
+// really have to this here, then we need to find a way to put this
+// information on the HeapConstant node already.
+#if 0
         // Constants in new space cannot be used as immediates in V8 because
         // the GC does not scan code objects when collecting the new generation.
         Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
         Isolate* isolate = value->GetIsolate();
         return !isolate->heap()->InNewSpace(*value);
+#endif
       }
       default:
         return false;
@@ -842,21 +853,15 @@
        g.Use(node->InputAt(0)));
 }
 
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
   X87OperandGenerator g(this);
+  Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
+       g.Use(node->InputAt(0)));
+}
 
-  switch (TruncationModeOf(node->op())) {
-    case TruncationMode::kJavaScript:
-      Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
-           g.Use(node->InputAt(0)));
-      return;
-    case TruncationMode::kRoundToZero:
-      Emit(kX87Float64ToInt32, g.DefineAsRegister(node),
-           g.Use(node->InputAt(0)));
-      return;
-  }
-  UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
 }
 
 
@@ -896,6 +901,12 @@
   Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
 }
 
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+  Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+  Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   X87OperandGenerator g(this);
@@ -904,6 +915,13 @@
   Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
 }
 
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+  Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+  Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
   X87OperandGenerator g(this);
@@ -1254,18 +1272,24 @@
 
   InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
 
+  int effect_level = selector->GetEffectLevel(node);
+  if (cont->IsBranch()) {
+    effect_level = selector->GetEffectLevel(
+        cont->true_block()->PredecessorAt(0)->control_input());
+  }
+
   // If one of the two inputs is an immediate, make sure it's on the right, or
   // if one of the two inputs is a memory operand, make sure it's on the left.
   if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
-      (g.CanBeMemoryOperand(narrowed_opcode, node, right) &&
-       !g.CanBeMemoryOperand(narrowed_opcode, node, left))) {
+      (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
+       !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (g.CanBeMemoryOperand(opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
       // TODO(epertoso): we should use `narrowed_opcode' here once we match
       // immediates too.
       return VisitCompareWithMemoryOperand(selector, opcode, left,
@@ -1276,7 +1300,7 @@
   }
 
   // Match memory operands on left side of comparison.
-  if (g.CanBeMemoryOperand(narrowed_opcode, node, left)) {
+  if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
     bool needs_byte_register =
         narrowed_opcode == kX87Test8 || narrowed_opcode == kX87Cmp8;
     return VisitCompareWithMemoryOperand(
@@ -1588,6 +1612,52 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+         load_rep.representation() == MachineRepresentation::kWord16 ||
+         load_rep.representation() == MachineRepresentation::kWord32);
+  USE(load_rep);
+  VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+  X87OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kX87Xchgb;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kX87Xchgw;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kX87Xchgl;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  AddressingMode addressing_mode;
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  inputs[input_count++] = g.UseUniqueRegister(base);
+  if (g.CanBeImmediate(index)) {
+    inputs[input_count++] = g.UseImmediate(index);
+    addressing_mode = kMode_MRI;
+  } else {
+    inputs[input_count++] = g.UseUniqueRegister(index);
+    addressing_mode = kMode_MR1;
+  }
+  inputs[input_count++] = g.UseUniqueRegister(value);
+  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+  Emit(code, 0, nullptr, input_count, inputs);
+}
 
 // static
 MachineOperatorBuilder::Flags
diff --git a/src/context-measure.cc b/src/context-measure.cc
index 3423629..00c11ee 100644
--- a/src/context-measure.cc
+++ b/src/context-measure.cc
@@ -18,7 +18,7 @@
       count_(0),
       size_(0) {
   DCHECK(context_->IsNativeContext());
-  Object* next_link = context_->get(Context::NEXT_CONTEXT_LINK);
+  Object* next_link = context_->next_context_link();
   MeasureObject(context_);
   MeasureDeferredObjects();
   context_->set(Context::NEXT_CONTEXT_LINK, next_link);
@@ -37,10 +37,10 @@
 
 
 void ContextMeasure::MeasureObject(HeapObject* object) {
-  if (back_reference_map_.Lookup(object).is_valid()) return;
+  if (reference_map_.Lookup(object).is_valid()) return;
   if (root_index_map_.Lookup(object) != RootIndexMap::kInvalidRootIndex) return;
   if (IsShared(object)) return;
-  back_reference_map_.Add(object, BackReference::DummyReference());
+  reference_map_.Add(object, SerializerReference::DummyReference());
   recursion_depth_++;
   if (recursion_depth_ > kMaxRecursion) {
     deferred_objects_.Add(object);
diff --git a/src/context-measure.h b/src/context-measure.h
index 665c547..7e94f2c 100644
--- a/src/context-measure.h
+++ b/src/context-measure.h
@@ -29,7 +29,7 @@
 
   Context* context_;
 
-  BackReferenceMap back_reference_map_;
+  SerializerReferenceMap reference_map_;
   RootIndexMap root_index_map_;
 
   static const int kMaxRecursion = 16;
diff --git a/src/contexts-inl.h b/src/contexts-inl.h
index 344d5db..5d62a04 100644
--- a/src/contexts-inl.h
+++ b/src/contexts-inl.h
@@ -56,6 +56,7 @@
 }
 void Context::set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
 
+Object* Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
 
 bool Context::has_extension() { return !extension()->IsTheHole(); }
 HeapObject* Context::extension() {
diff --git a/src/contexts.cc b/src/contexts.cc
index 67a9fea..392a3cc 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -184,29 +184,24 @@
   switch (mode) {
     case VAR:
       *attributes = NONE;
-      *binding_flags = MUTABLE_IS_INITIALIZED;
+      *binding_flags = BINDING_IS_INITIALIZED;
       break;
     case LET:
       *attributes = NONE;
       *binding_flags = (init_flag == kNeedsInitialization)
-                           ? MUTABLE_CHECK_INITIALIZED
-                           : MUTABLE_IS_INITIALIZED;
+                           ? BINDING_CHECK_INITIALIZED
+                           : BINDING_IS_INITIALIZED;
       break;
     case CONST_LEGACY:
+      DCHECK_EQ(kCreatedInitialized, init_flag);
       *attributes = READ_ONLY;
-      *binding_flags = (init_flag == kNeedsInitialization)
-                           ? IMMUTABLE_CHECK_INITIALIZED
-                           : IMMUTABLE_IS_INITIALIZED;
+      *binding_flags = BINDING_IS_INITIALIZED;
       break;
     case CONST:
       *attributes = READ_ONLY;
       *binding_flags = (init_flag == kNeedsInitialization)
-                           ? IMMUTABLE_CHECK_INITIALIZED_HARMONY
-                           : IMMUTABLE_IS_INITIALIZED_HARMONY;
-      break;
-    case IMPORT:
-      // TODO(ES6)
-      UNREACHABLE();
+                           ? BINDING_CHECK_INITIALIZED
+                           : BINDING_IS_INITIALIZED;
       break;
     case DYNAMIC:
     case DYNAMIC_GLOBAL:
@@ -362,8 +357,7 @@
           *index = function_index;
           *attributes = READ_ONLY;
           DCHECK(mode == CONST_LEGACY || mode == CONST);
-          *binding_flags = (mode == CONST_LEGACY)
-              ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
+          *binding_flags = BINDING_IS_INITIALIZED;
           return context;
         }
       }
@@ -376,7 +370,7 @@
         }
         *index = Context::THROWN_OBJECT_INDEX;
         *attributes = NONE;
-        *binding_flags = MUTABLE_IS_INITIALIZED;
+        *binding_flags = BINDING_IS_INITIALIZED;
         return context;
       }
     } else if (context->IsDebugEvaluateContext()) {
@@ -466,7 +460,7 @@
       found = true;
       break;
     }
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+    context = Context::cast(context)->next_context_link();
   }
   CHECK(found);
 #endif
diff --git a/src/contexts.h b/src/contexts.h
index 90fb9a4..1161885 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -30,44 +30,34 @@
 // their state is changed by the InitializeImmutableBinding method. The
 // BindingFlags enum represents information if a binding has definitely been
 // initialized. A mutable binding does not need to be checked and thus has
-// the BindingFlag MUTABLE_IS_INITIALIZED.
+// the BindingFlag BINDING_IS_INITIALIZED.
 //
-// There are two possibilities for immutable bindings
-//  * 'const' declared variables. They are initialized when evaluating the
-//    corresponding declaration statement. They need to be checked for being
-//    initialized and thus get the flag IMMUTABLE_CHECK_INITIALIZED.
+// There is one possibility for legacy immutable bindings:
 //  * The function name of a named function literal. The binding is immediately
 //    initialized when entering the function and thus does not need to be
-//    checked. it gets the BindingFlag IMMUTABLE_IS_INITIALIZED.
-// Accessing an uninitialized binding produces the undefined value.
+//    checked. it gets the BindingFlag BINDING_IS_INITIALIZED.
 //
 // The harmony proposal for block scoped bindings also introduces the
 // uninitialized state for mutable bindings.
 //  * A 'let' declared variable. They are initialized when evaluating the
 //    corresponding declaration statement. They need to be checked for being
-//    initialized and thus get the flag MUTABLE_CHECK_INITIALIZED.
+//    initialized and thus get the flag BINDING_CHECK_INITIALIZED.
 //  * A 'var' declared variable. It is initialized immediately upon creation
 //    and thus doesn't need to be checked. It gets the flag
-//    MUTABLE_IS_INITIALIZED.
+//    BINDING_IS_INITIALIZED.
 //  * Catch bound variables, function parameters and variables introduced by
 //    function declarations are initialized immediately and do not need to be
-//    checked. Thus they get the flag MUTABLE_IS_INITIALIZED.
-// Immutable bindings in harmony mode get the _HARMONY flag variants. Accessing
-// an uninitialized binding produces a reference error.
+//    checked. Thus they get the flag BINDING_IS_INITIALIZED.
+// Accessing an uninitialized binding produces a reference error.
 //
 // In V8 uninitialized bindings are set to the hole value upon creation and set
 // to a different value upon initialization.
 enum BindingFlags {
-  MUTABLE_IS_INITIALIZED,
-  MUTABLE_CHECK_INITIALIZED,
-  IMMUTABLE_IS_INITIALIZED,
-  IMMUTABLE_CHECK_INITIALIZED,
-  IMMUTABLE_IS_INITIALIZED_HARMONY,
-  IMMUTABLE_CHECK_INITIALIZED_HARMONY,
+  BINDING_IS_INITIALIZED,
+  BINDING_CHECK_INITIALIZED,
   MISSING_BINDING
 };
 
-
 // Heap-allocated activation contexts.
 //
 // Contexts are implemented as FixedArray objects; the Context
@@ -77,79 +67,76 @@
 // must always be allocated via Heap::AllocateContext() or
 // Factory::NewContext.
 
-#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)                             \
-  V(IS_ARRAYLIKE, JSFunction, is_arraylike)                               \
-  V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site)     \
-  V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error)                 \
-  V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error)                   \
-  V(OBJECT_FREEZE, JSFunction, object_freeze)                             \
-  V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible)               \
-  V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen)                       \
-  V(OBJECT_IS_SEALED, JSFunction, object_is_sealed)                       \
-  V(OBJECT_KEYS, JSFunction, object_keys)                                 \
-  V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply)                       \
-  V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct)               \
-  V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property)   \
-  V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property)   \
-  V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments)                 \
-  V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)                   \
-  V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance)       \
-  V(MATH_FLOOR, JSFunction, math_floor)                                   \
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)                           \
+  V(IS_ARRAYLIKE, JSFunction, is_arraylike)                             \
+  V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site)   \
+  V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error)               \
+  V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error)                 \
+  V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties)     \
+  V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property)         \
+  V(OBJECT_FREEZE, JSFunction, object_freeze)                           \
+  V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of)       \
+  V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible)             \
+  V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen)                     \
+  V(OBJECT_IS_SEALED, JSFunction, object_is_sealed)                     \
+  V(OBJECT_KEYS, JSFunction, object_keys)                               \
+  V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply)                     \
+  V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct)             \
+  V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+  V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
+  V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments)               \
+  V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)                 \
+  V(MATH_FLOOR, JSFunction, math_floor)                                 \
   V(MATH_SQRT, JSFunction, math_sqrt)
 
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                     \
-  V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                             \
-  V(ARRAY_POP_INDEX, JSFunction, array_pop)                                   \
-  V(ARRAY_PUSH_INDEX, JSFunction, array_push)                                 \
-  V(ARRAY_SHIFT_INDEX, JSFunction, array_shift)                               \
-  V(ARRAY_SPLICE_INDEX, JSFunction, array_splice)                             \
-  V(ARRAY_SLICE_INDEX, JSFunction, array_slice)                               \
-  V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift)                           \
-  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)           \
-  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)                     \
-  V(ERROR_FUNCTION_INDEX, JSFunction, error_function)                         \
-  V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function)               \
-  V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun)         \
-  V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                       \
-  V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter)         \
-  V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function)               \
-  V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete)                          \
-  V(MAP_GET_METHOD_INDEX, JSFunction, map_get)                                \
-  V(MAP_HAS_METHOD_INDEX, JSFunction, map_has)                                \
-  V(MAP_SET_METHOD_INDEX, JSFunction, map_set)                                \
-  V(MATH_POW_METHOD_INDEX, JSFunction, math_pow)                              \
-  V(MESSAGE_GET_COLUMN_NUMBER_INDEX, JSFunction, message_get_column_number)   \
-  V(MESSAGE_GET_LINE_NUMBER_INDEX, JSFunction, message_get_line_number)       \
-  V(MESSAGE_GET_SOURCE_LINE_INDEX, JSFunction, message_get_source_line)       \
-  V(NATIVE_OBJECT_GET_NOTIFIER_INDEX, JSFunction, native_object_get_notifier) \
-  V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction,                        \
-    native_object_notifier_perform_change)                                    \
-  V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, native_object_observe)           \
-  V(NO_SIDE_EFFECTS_TO_STRING_FUN_INDEX, JSFunction,                          \
-    no_side_effects_to_string_fun)                                            \
-  V(OBJECT_VALUE_OF, JSFunction, object_value_of)                             \
-  V(OBJECT_TO_STRING, JSFunction, object_to_string)                           \
-  V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice) \
-  V(OBSERVERS_END_SPLICE_INDEX, JSFunction, observers_end_perform_splice)     \
-  V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice)     \
-  V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change)       \
-  V(PROMISE_CATCH_INDEX, JSFunction, promise_catch)                           \
-  V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain)                           \
-  V(PROMISE_CREATE_INDEX, JSFunction, promise_create)                         \
-  V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function)                     \
-  V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction,                \
-    promise_has_user_defined_reject_handler)                                  \
-  V(PROMISE_REJECT_INDEX, JSFunction, promise_reject)                         \
-  V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                       \
-  V(PROMISE_THEN_INDEX, JSFunction, promise_then)                             \
-  V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function)             \
-  V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function)     \
-  V(SET_ADD_METHOD_INDEX, JSFunction, set_add)                                \
-  V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete)                          \
-  V(SET_HAS_METHOD_INDEX, JSFunction, set_has)                                \
-  V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate)   \
-  V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function)           \
-  V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function)               \
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                   \
+  V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                           \
+  V(ARRAY_POP_INDEX, JSFunction, array_pop)                                 \
+  V(ARRAY_PUSH_INDEX, JSFunction, array_push)                               \
+  V(ARRAY_SHIFT_INDEX, JSFunction, array_shift)                             \
+  V(ARRAY_SPLICE_INDEX, JSFunction, array_splice)                           \
+  V(ARRAY_SLICE_INDEX, JSFunction, array_slice)                             \
+  V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift)                         \
+  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)         \
+  V(ASYNC_FUNCTION_AWAIT_INDEX, JSFunction, async_function_await)           \
+  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)                   \
+  V(ERROR_FUNCTION_INDEX, JSFunction, error_function)                       \
+  V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function)             \
+  V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun)       \
+  V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                     \
+  V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter)       \
+  V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function)             \
+  V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete)                        \
+  V(MAP_GET_METHOD_INDEX, JSFunction, map_get)                              \
+  V(MAP_HAS_METHOD_INDEX, JSFunction, map_has)                              \
+  V(MAP_SET_METHOD_INDEX, JSFunction, map_set)                              \
+  V(MATH_POW_METHOD_INDEX, JSFunction, math_pow)                            \
+  V(MESSAGE_GET_COLUMN_NUMBER_INDEX, JSFunction, message_get_column_number) \
+  V(MESSAGE_GET_LINE_NUMBER_INDEX, JSFunction, message_get_line_number)     \
+  V(MESSAGE_GET_SOURCE_LINE_INDEX, JSFunction, message_get_source_line)     \
+  V(NO_SIDE_EFFECTS_TO_STRING_FUN_INDEX, JSFunction,                        \
+    no_side_effects_to_string_fun)                                          \
+  V(OBJECT_VALUE_OF, JSFunction, object_value_of)                           \
+  V(OBJECT_TO_STRING, JSFunction, object_to_string)                         \
+  V(PROMISE_CATCH_INDEX, JSFunction, promise_catch)                         \
+  V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain)                         \
+  V(PROMISE_CREATE_INDEX, JSFunction, promise_create)                       \
+  V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function)                   \
+  V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction,              \
+    promise_has_user_defined_reject_handler)                                \
+  V(PROMISE_REJECT_INDEX, JSFunction, promise_reject)                       \
+  V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                     \
+  V(PROMISE_CREATE_RESOLVED_INDEX, JSFunction, promise_create_resolved)     \
+  V(PROMISE_CREATE_REJECTED_INDEX, JSFunction, promise_create_rejected)     \
+  V(PROMISE_THEN_INDEX, JSFunction, promise_then)                           \
+  V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function)           \
+  V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function)   \
+  V(SET_ADD_METHOD_INDEX, JSFunction, set_add)                              \
+  V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete)                        \
+  V(SET_HAS_METHOD_INDEX, JSFunction, set_has)                              \
+  V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate) \
+  V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function)         \
+  V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function)             \
   V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)
 
 #define NATIVE_CONTEXT_FIELDS(V)                                               \
@@ -162,6 +149,7 @@
   V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun)                      \
   V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map)                             \
   V(ARRAY_FUNCTION_INDEX, JSFunction, array_function)                          \
+  V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor)     \
   V(BOOL16X8_FUNCTION_INDEX, JSFunction, bool16x8_function)                    \
   V(BOOL32X4_FUNCTION_INDEX, JSFunction, bool32x4_function)                    \
   V(BOOL8X16_FUNCTION_INDEX, JSFunction, bool8x16_function)                    \
@@ -193,6 +181,7 @@
     generator_function_function)                                               \
   V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
   V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype)          \
+  V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype)  \
   V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype)        \
   V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun)                        \
   V(INT16X8_FUNCTION_INDEX, JSFunction, int16x8_function)                      \
@@ -249,8 +238,10 @@
   V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map,                    \
     sloppy_function_with_readonly_prototype_map)                               \
   V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map)                           \
+  V(SLOPPY_ASYNC_FUNCTION_MAP_INDEX, Map, sloppy_async_function_map)           \
   V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map)   \
   V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map)         \
+  V(STRICT_ASYNC_FUNCTION_MAP_INDEX, Map, strict_async_function_map)           \
   V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map)                     \
   V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map)                       \
   V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map,                          \
@@ -421,6 +412,8 @@
   inline Context* previous();
   inline void set_previous(Context* context);
 
+  inline Object* next_context_link();
+
   inline bool has_extension();
   inline HeapObject* extension();
   inline void set_extension(HeapObject* object);
@@ -531,6 +524,11 @@
                                       : SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
     }
 
+    if (IsAsyncFunction(kind)) {
+      return is_strict(language_mode) ? STRICT_ASYNC_FUNCTION_MAP_INDEX
+                                      : SLOPPY_ASYNC_FUNCTION_MAP_INDEX;
+    }
+
     if (IsClassConstructor(kind)) {
       // Use strict function map (no own "caller" / "arguments")
       return STRICT_FUNCTION_MAP_INDEX;
diff --git a/src/counters-inl.h b/src/counters-inl.h
new file mode 100644
index 0000000..c8c06d2
--- /dev/null
+++ b/src/counters-inl.h
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COUNTERS_INL_H_
+#define V8_COUNTERS_INL_H_
+
+#include "src/counters.h"
+
+namespace v8 {
+namespace internal {
+
+RuntimeCallTimerScope::RuntimeCallTimerScope(
+    HeapObject* heap_object, RuntimeCallStats::CounterId counter_id) {
+  if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
+    isolate_ = heap_object->GetIsolate();
+    RuntimeCallStats::Enter(isolate_, &timer_, counter_id);
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COUNTERS_INL_H_
diff --git a/src/counters.cc b/src/counters.cc
index 4f5c251..0dd62a0 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -272,30 +272,38 @@
   time = base::TimeDelta();
 }
 
-void RuntimeCallStats::Enter(RuntimeCallCounter* counter) {
-  RuntimeCallTimer* timer = new RuntimeCallTimer();
-  timer->Initialize(counter, current_timer_);
-  Enter(timer);
+// static
+void RuntimeCallStats::Enter(Isolate* isolate, RuntimeCallTimer* timer,
+                             CounterId counter_id) {
+  RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+  RuntimeCallCounter* counter = &(stats->*counter_id);
+  timer->Start(counter, stats->current_timer_);
+  stats->current_timer_ = timer;
 }
 
-void RuntimeCallStats::Enter(RuntimeCallTimer* timer_) {
-  current_timer_ = timer_;
-  current_timer_->Start();
+// static
+void RuntimeCallStats::Leave(Isolate* isolate, RuntimeCallTimer* timer) {
+  RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+  DCHECK_EQ(stats->current_timer_, timer);
+  stats->current_timer_ = timer->Stop();
 }
 
-void RuntimeCallStats::Leave() {
-  RuntimeCallTimer* timer = current_timer_;
-  Leave(timer);
-  delete timer;
-}
-
-void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
-  current_timer_ = timer->Stop();
+// static
+void RuntimeCallStats::CorrectCurrentCounterId(Isolate* isolate,
+                                               CounterId counter_id) {
+  RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+  DCHECK_NOT_NULL(stats->current_timer_);
+  RuntimeCallCounter* counter = &(stats->*counter_id);
+  stats->current_timer_->counter_ = counter;
 }
 
 void RuntimeCallStats::Print(std::ostream& os) {
   RuntimeCallStatEntries entries;
 
+#define PRINT_COUNTER(name) entries.Add(&this->name);
+  FOR_EACH_MANUAL_COUNTER(PRINT_COUNTER)
+#undef PRINT_COUNTER
+
 #define PRINT_COUNTER(name, nargs, ressize) entries.Add(&this->Runtime_##name);
   FOR_EACH_INTRINSIC(PRINT_COUNTER)
 #undef PRINT_COUNTER
@@ -304,36 +312,38 @@
   BUILTIN_LIST_C(PRINT_COUNTER)
 #undef PRINT_COUNTER
 
-  entries.Add(&this->ExternalCallback);
-  entries.Add(&this->GC);
-  entries.Add(&this->UnexpectedStubMiss);
+#define PRINT_COUNTER(name) entries.Add(&this->API_##name);
+  FOR_EACH_API_COUNTER(PRINT_COUNTER)
+#undef PRINT_COUNTER
+
+#define PRINT_COUNTER(name) entries.Add(&this->Handler_##name);
+  FOR_EACH_HANDLER_COUNTER(PRINT_COUNTER)
+#undef PRINT_COUNTER
 
   entries.Print(os);
 }
 
 void RuntimeCallStats::Reset() {
   if (!FLAG_runtime_call_stats) return;
-#define RESET_COUNTER(name, nargs, ressize) this->Runtime_##name.Reset();
+#define RESET_COUNTER(name) this->name.Reset();
+  FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
+
+#define RESET_COUNTER(name, nargs, result_size) this->Runtime_##name.Reset();
   FOR_EACH_INTRINSIC(RESET_COUNTER)
 #undef RESET_COUNTER
+
 #define RESET_COUNTER(name, type) this->Builtin_##name.Reset();
   BUILTIN_LIST_C(RESET_COUNTER)
 #undef RESET_COUNTER
-  this->ExternalCallback.Reset();
-  this->GC.Reset();
-  this->UnexpectedStubMiss.Reset();
-}
 
-void RuntimeCallTimerScope::Enter(Isolate* isolate,
-                                  RuntimeCallCounter* counter) {
-  isolate_ = isolate;
-  RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
-  timer_.Initialize(counter, stats->current_timer());
-  stats->Enter(&timer_);
-}
+#define RESET_COUNTER(name) this->API_##name.Reset();
+  FOR_EACH_API_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
 
-void RuntimeCallTimerScope::Leave() {
-  isolate_->counters()->runtime_call_stats()->Leave(&timer_);
+#define RESET_COUNTER(name) this->Handler_##name.Reset();
+  FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
 }
 
 }  // namespace internal
diff --git a/src/counters.h b/src/counters.h
index 7183d0e..a61cacf 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -492,43 +492,255 @@
 // timers used for properly measuring the own time of a RuntimeCallCounter.
 class RuntimeCallTimer {
  public:
-  inline void Initialize(RuntimeCallCounter* counter,
-                         RuntimeCallTimer* parent) {
+  RuntimeCallTimer() {}
+
+ private:
+  friend class RuntimeCallStats;
+
+  inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent) {
     counter_ = counter;
     parent_ = parent;
-  }
-
-  inline void Start() {
     timer_.Start();
-    counter_->count++;
   }
 
   inline RuntimeCallTimer* Stop() {
     base::TimeDelta delta = timer_.Elapsed();
+    timer_.Stop();
+    counter_->count++;
     counter_->time += delta;
     if (parent_ != NULL) {
-      parent_->AdjustForSubTimer(delta);
+      // Adjust parent timer so that it does not include sub timer's time.
+      parent_->counter_->time -= delta;
     }
     return parent_;
   }
 
-  inline void AdjustForSubTimer(base::TimeDelta delta) {
-    counter_->time -= delta;
-  }
-
- private:
-  RuntimeCallCounter* counter_;
-  RuntimeCallTimer* parent_;
+  RuntimeCallCounter* counter_ = nullptr;
+  RuntimeCallTimer* parent_ = nullptr;
   base::ElapsedTimer timer_;
 };
 
-struct RuntimeCallStats {
-  // Dummy counter for the unexpected stub miss.
-  RuntimeCallCounter UnexpectedStubMiss =
-      RuntimeCallCounter("UnexpectedStubMiss");
-  // Counter for runtime callbacks into JavaScript.
-  RuntimeCallCounter ExternalCallback = RuntimeCallCounter("ExternalCallback");
-  RuntimeCallCounter GC = RuntimeCallCounter("GC");
+#define FOR_EACH_API_COUNTER(V)                            \
+  V(ArrayBuffer_Cast)                                      \
+  V(ArrayBuffer_Neuter)                                    \
+  V(ArrayBuffer_New)                                       \
+  V(Array_CloneElementAt)                                  \
+  V(Array_New)                                             \
+  V(BooleanObject_BooleanValue)                            \
+  V(BooleanObject_New)                                     \
+  V(Context_New)                                           \
+  V(DataView_New)                                          \
+  V(Date_DateTimeConfigurationChangeNotification)          \
+  V(Date_New)                                              \
+  V(Date_NumberValue)                                      \
+  V(Debug_Call)                                            \
+  V(Debug_GetMirror)                                       \
+  V(Error_New)                                             \
+  V(External_New)                                          \
+  V(Float32Array_New)                                      \
+  V(Float64Array_New)                                      \
+  V(Function_Call)                                         \
+  V(Function_New)                                          \
+  V(Function_NewInstance)                                  \
+  V(FunctionTemplate_GetFunction)                          \
+  V(FunctionTemplate_New)                                  \
+  V(FunctionTemplate_NewWithFastHandler)                   \
+  V(Int16Array_New)                                        \
+  V(Int32Array_New)                                        \
+  V(Int8Array_New)                                         \
+  V(JSON_Parse)                                            \
+  V(JSON_Stringify)                                        \
+  V(Map_AsArray)                                           \
+  V(Map_Clear)                                             \
+  V(Map_Delete)                                            \
+  V(Map_Get)                                               \
+  V(Map_Has)                                               \
+  V(Map_New)                                               \
+  V(Map_Set)                                               \
+  V(Message_GetEndColumn)                                  \
+  V(Message_GetLineNumber)                                 \
+  V(Message_GetSourceLine)                                 \
+  V(Message_GetStartColumn)                                \
+  V(NumberObject_New)                                      \
+  V(NumberObject_NumberValue)                              \
+  V(Object_CallAsConstructor)                              \
+  V(Object_CallAsFunction)                                 \
+  V(Object_CreateDataProperty)                             \
+  V(Object_DefineOwnProperty)                              \
+  V(Object_Delete)                                         \
+  V(Object_DeleteProperty)                                 \
+  V(Object_ForceSet)                                       \
+  V(Object_Get)                                            \
+  V(Object_GetOwnPropertyDescriptor)                       \
+  V(Object_GetOwnPropertyNames)                            \
+  V(Object_GetPropertyAttributes)                          \
+  V(Object_GetPropertyNames)                               \
+  V(Object_GetRealNamedProperty)                           \
+  V(Object_GetRealNamedPropertyAttributes)                 \
+  V(Object_GetRealNamedPropertyAttributesInPrototypeChain) \
+  V(Object_GetRealNamedPropertyInPrototypeChain)           \
+  V(Object_HasOwnProperty)                                 \
+  V(Object_HasRealIndexedProperty)                         \
+  V(Object_HasRealNamedCallbackProperty)                   \
+  V(Object_HasRealNamedProperty)                           \
+  V(Object_Int32Value)                                     \
+  V(Object_IntegerValue)                                   \
+  V(Object_New)                                            \
+  V(Object_NumberValue)                                    \
+  V(Object_ObjectProtoToString)                            \
+  V(Object_Set)                                            \
+  V(Object_SetAccessor)                                    \
+  V(Object_SetIntegrityLevel)                              \
+  V(Object_SetPrivate)                                     \
+  V(Object_SetPrototype)                                   \
+  V(ObjectTemplate_New)                                    \
+  V(ObjectTemplate_NewInstance)                            \
+  V(Object_ToArrayIndex)                                   \
+  V(Object_ToDetailString)                                 \
+  V(Object_ToInt32)                                        \
+  V(Object_ToInteger)                                      \
+  V(Object_ToNumber)                                       \
+  V(Object_ToObject)                                       \
+  V(Object_ToString)                                       \
+  V(Object_ToUint32)                                       \
+  V(Object_Uint32Value)                                    \
+  V(Persistent_New)                                        \
+  V(Private_New)                                           \
+  V(Promise_Catch)                                         \
+  V(Promise_Chain)                                         \
+  V(Promise_HasRejectHandler)                              \
+  V(Promise_Resolver_New)                                  \
+  V(Promise_Resolver_Resolve)                              \
+  V(Promise_Then)                                          \
+  V(Proxy_New)                                             \
+  V(RangeError_New)                                        \
+  V(ReferenceError_New)                                    \
+  V(RegExp_New)                                            \
+  V(ScriptCompiler_Compile)                                \
+  V(ScriptCompiler_CompileFunctionInContext)               \
+  V(ScriptCompiler_CompileUnbound)                         \
+  V(Script_Run)                                            \
+  V(Set_Add)                                               \
+  V(Set_AsArray)                                           \
+  V(Set_Clear)                                             \
+  V(Set_Delete)                                            \
+  V(Set_Has)                                               \
+  V(Set_New)                                               \
+  V(SharedArrayBuffer_New)                                 \
+  V(String_Concat)                                         \
+  V(String_NewExternalOneByte)                             \
+  V(String_NewExternalTwoByte)                             \
+  V(String_NewFromOneByte)                                 \
+  V(String_NewFromTwoByte)                                 \
+  V(String_NewFromUtf8)                                    \
+  V(StringObject_New)                                      \
+  V(StringObject_StringValue)                              \
+  V(String_Write)                                          \
+  V(String_WriteUtf8)                                      \
+  V(Symbol_New)                                            \
+  V(SymbolObject_New)                                      \
+  V(SymbolObject_SymbolValue)                              \
+  V(SyntaxError_New)                                       \
+  V(TryCatch_StackTrace)                                   \
+  V(TypeError_New)                                         \
+  V(Uint16Array_New)                                       \
+  V(Uint32Array_New)                                       \
+  V(Uint8Array_New)                                        \
+  V(Uint8ClampedArray_New)                                 \
+  V(UnboundScript_GetId)                                   \
+  V(UnboundScript_GetLineNumber)                           \
+  V(UnboundScript_GetName)                                 \
+  V(UnboundScript_GetSourceMappingURL)                     \
+  V(UnboundScript_GetSourceURL)                            \
+  V(Value_TypeOf)
+
+#define FOR_EACH_MANUAL_COUNTER(V)                  \
+  V(AccessorGetterCallback)                         \
+  V(AccessorNameGetterCallback)                     \
+  V(AccessorNameSetterCallback)                     \
+  V(Compile)                                        \
+  V(CompileCode)                                    \
+  V(CompileDeserialize)                             \
+  V(CompileEval)                                    \
+  V(CompileFullCode)                                \
+  V(CompileIgnition)                                \
+  V(CompileSerialize)                               \
+  V(DeoptimizeCode)                                 \
+  V(FunctionCallback)                               \
+  V(GC)                                             \
+  V(GenericNamedPropertyDeleterCallback)            \
+  V(GenericNamedPropertyQueryCallback)              \
+  V(GenericNamedPropertySetterCallback)             \
+  V(IndexedPropertyDeleterCallback)                 \
+  V(IndexedPropertyGetterCallback)                  \
+  V(IndexedPropertyQueryCallback)                   \
+  V(IndexedPropertySetterCallback)                  \
+  V(InvokeFunctionCallback)                         \
+  V(JS_Execution)                                   \
+  V(Map_SetPrototype)                               \
+  V(Map_TransitionToAccessorProperty)               \
+  V(Map_TransitionToDataProperty)                   \
+  V(Object_DeleteProperty)                          \
+  V(OptimizeCode)                                   \
+  V(Parse)                                          \
+  V(ParseLazy)                                      \
+  V(PropertyCallback)                               \
+  V(PrototypeMap_TransitionToAccessorProperty)      \
+  V(PrototypeMap_TransitionToDataProperty)          \
+  V(PrototypeObject_DeleteProperty)                 \
+  V(RecompileConcurrent)                            \
+  V(RecompileSynchronous)                           \
+  /* Dummy counter for the unexpected stub miss. */ \
+  V(UnexpectedStubMiss)
+
+#define FOR_EACH_HANDLER_COUNTER(V)             \
+  V(IC_HandlerCacheHit)                         \
+  V(KeyedLoadIC_LoadIndexedStringStub)          \
+  V(KeyedLoadIC_LoadIndexedInterceptorStub)     \
+  V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub)   \
+  V(KeyedLoadIC_LoadFastElementStub)            \
+  V(KeyedLoadIC_LoadDictionaryElementStub)      \
+  V(KeyedLoadIC_PolymorphicElement)             \
+  V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
+  V(KeyedStoreIC_StoreFastElementStub)          \
+  V(KeyedStoreIC_StoreElementStub)              \
+  V(KeyedStoreIC_Polymorphic)                   \
+  V(LoadIC_FunctionPrototypeStub)               \
+  V(LoadIC_ArrayBufferViewLoadFieldStub)        \
+  V(LoadIC_LoadApiGetterStub)                   \
+  V(LoadIC_LoadCallback)                        \
+  V(LoadIC_LoadConstant)                        \
+  V(LoadIC_LoadConstantStub)                    \
+  V(LoadIC_LoadField)                           \
+  V(LoadIC_LoadFieldStub)                       \
+  V(LoadIC_LoadGlobal)                          \
+  V(LoadIC_LoadInterceptor)                     \
+  V(LoadIC_LoadNonexistent)                     \
+  V(LoadIC_LoadNormal)                          \
+  V(LoadIC_LoadScriptContextFieldStub)          \
+  V(LoadIC_LoadViaGetter)                       \
+  V(LoadIC_SlowStub)                            \
+  V(LoadIC_StringLengthStub)                    \
+  V(StoreIC_SlowStub)                           \
+  V(StoreIC_StoreCallback)                      \
+  V(StoreIC_StoreField)                         \
+  V(StoreIC_StoreFieldStub)                     \
+  V(StoreIC_StoreGlobal)                        \
+  V(StoreIC_StoreGlobalTransition)              \
+  V(StoreIC_StoreInterceptorStub)               \
+  V(StoreIC_StoreNormal)                        \
+  V(StoreIC_StoreScriptContextFieldStub)        \
+  V(StoreIC_StoreTransition)                    \
+  V(StoreIC_StoreViaSetter)
+
+class RuntimeCallStats {
+ public:
+  typedef RuntimeCallCounter RuntimeCallStats::*CounterId;
+
+#define CALL_RUNTIME_COUNTER(name) \
+  RuntimeCallCounter name = RuntimeCallCounter(#name);
+  FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
 #define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
   RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
   FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
@@ -537,42 +749,71 @@
   RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name);
   BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
 #undef CALL_BUILTIN_COUNTER
-
-  // Counter to track recursive time events.
-  RuntimeCallTimer* current_timer_ = NULL;
+#define CALL_BUILTIN_COUNTER(name) \
+  RuntimeCallCounter API_##name = RuntimeCallCounter("API_" #name);
+  FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) \
+  RuntimeCallCounter Handler_##name = RuntimeCallCounter(#name);
+  FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
 
   // Starting measuring the time for a function. This will establish the
   // connection to the parent counter for properly calculating the own times.
-  void Enter(RuntimeCallCounter* counter);
-  void Enter(RuntimeCallTimer* timer);
+  static void Enter(Isolate* isolate, RuntimeCallTimer* timer,
+                    CounterId counter_id);
+
   // Leave a scope for a measured runtime function. This will properly add
   // the time delta to the current_counter and subtract the delta from its
   // parent.
-  void Leave();
-  void Leave(RuntimeCallTimer* timer);
+  static void Leave(Isolate* isolate, RuntimeCallTimer* timer);
 
-  RuntimeCallTimer* current_timer() { return current_timer_; }
+  // Set counter id for the innermost measurement. It can be used to refine
+  // event kind when a runtime entry counter is too generic.
+  static void CorrectCurrentCounterId(Isolate* isolate, CounterId counter_id);
 
   void Reset();
   void Print(std::ostream& os);
 
   RuntimeCallStats() { Reset(); }
+
+ private:
+  // Counter to track recursive time events.
+  RuntimeCallTimer* current_timer_ = NULL;
 };
 
+#define TRACE_RUNTIME_CALL_STATS(isolate, counter_name) \
+  do {                                                  \
+    if (FLAG_runtime_call_stats) {                      \
+      RuntimeCallStats::CorrectCurrentCounterId(        \
+          isolate, &RuntimeCallStats::counter_name);    \
+    }                                                   \
+  } while (false)
+
+#define TRACE_HANDLER_STATS(isolate, counter_name) \
+  TRACE_RUNTIME_CALL_STATS(isolate, Handler_##counter_name)
+
 // A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
 // the time of C++ scope.
 class RuntimeCallTimerScope {
  public:
-  inline explicit RuntimeCallTimerScope(Isolate* isolate,
-                                        RuntimeCallCounter* counter) {
-    if (FLAG_runtime_call_stats) Enter(isolate, counter);
+  inline RuntimeCallTimerScope(Isolate* isolate,
+                               RuntimeCallStats::CounterId counter_id) {
+    if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
+      isolate_ = isolate;
+      RuntimeCallStats::Enter(isolate_, &timer_, counter_id);
+    }
   }
-  inline ~RuntimeCallTimerScope() {
-    if (FLAG_runtime_call_stats) Leave();
-  }
+  // This constructor is here just to avoid calling GetIsolate() when the
+  // stats are disabled and the isolate is not directly available.
+  inline RuntimeCallTimerScope(HeapObject* heap_object,
+                               RuntimeCallStats::CounterId counter_id);
 
-  void Enter(Isolate* isolate, RuntimeCallCounter* counter);
-  void Leave();
+  inline ~RuntimeCallTimerScope() {
+    if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
+      RuntimeCallStats::Leave(isolate_, &timer_);
+    }
+  }
 
  private:
   Isolate* isolate_;
@@ -588,39 +829,53 @@
      101)                                                                     \
   HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6)             \
   HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20)        \
-  HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7)
+  HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7)                      \
+  /* Asm/Wasm. */                                                             \
+  HR(wasm_functions_per_module, V8.WasmFunctionsPerModule, 1, 10000, 51)
 
-#define HISTOGRAM_TIMER_LIST(HT)                                              \
-  /* Garbage collection timers. */                                            \
-  HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND)                        \
-  HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND)                        \
-  HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000,           \
-     MILLISECOND)                                                             \
-  HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND)                        \
-  HT(gc_context, V8.GCContext, 10000,                                         \
-     MILLISECOND) /* GC context cleanup time */                               \
-  HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND)         \
-  HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND)     \
-  HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000,       \
-     MILLISECOND)                                                             \
-  HT(gc_incremental_marking_finalize, V8.GCIncrementalMarkingFinalize, 10000, \
-     MILLISECOND)                                                             \
-  HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000,           \
-     MILLISECOND)                                                             \
-  /* Parsing timers. */                                                       \
-  HT(parse, V8.ParseMicroSeconds, 1000000, MICROSECOND)                       \
-  HT(parse_lazy, V8.ParseLazyMicroSeconds, 1000000, MICROSECOND)              \
-  HT(pre_parse, V8.PreParseMicroSeconds, 1000000, MICROSECOND)                \
-  /* Compilation times. */                                                    \
-  HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND)                   \
-  HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND)          \
-  /* Serialization as part of compilation (code caching) */                   \
-  HT(compile_serialize, V8.CompileSerializeMicroSeconds, 100000, MICROSECOND) \
-  HT(compile_deserialize, V8.CompileDeserializeMicroSeconds, 1000000,         \
-     MICROSECOND)                                                             \
-  /* Total compilation time incl. caching/parsing */                          \
-  HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND)
-
+#define HISTOGRAM_TIMER_LIST(HT)                                               \
+  /* Garbage collection timers. */                                             \
+  HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND)                         \
+  HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND)                         \
+  HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000,            \
+     MILLISECOND)                                                              \
+  HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND)                         \
+  HT(gc_context, V8.GCContext, 10000,                                          \
+     MILLISECOND) /* GC context cleanup time */                                \
+  HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND)          \
+  HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND)      \
+  HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000,        \
+     MILLISECOND)                                                              \
+  HT(gc_incremental_marking_finalize, V8.GCIncrementalMarkingFinalize, 10000,  \
+     MILLISECOND)                                                              \
+  HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000,            \
+     MILLISECOND)                                                              \
+  /* Parsing timers. */                                                        \
+  HT(parse, V8.ParseMicroSeconds, 1000000, MICROSECOND)                        \
+  HT(parse_lazy, V8.ParseLazyMicroSeconds, 1000000, MICROSECOND)               \
+  HT(pre_parse, V8.PreParseMicroSeconds, 1000000, MICROSECOND)                 \
+  /* Compilation times. */                                                     \
+  HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND)                    \
+  HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND)           \
+  /* Serialization as part of compilation (code caching) */                    \
+  HT(compile_serialize, V8.CompileSerializeMicroSeconds, 100000, MICROSECOND)  \
+  HT(compile_deserialize, V8.CompileDeserializeMicroSeconds, 1000000,          \
+     MICROSECOND)                                                              \
+  /* Total compilation time incl. caching/parsing */                           \
+  HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND)       \
+  /* Total JavaScript execution time (including callbacks and runtime calls */ \
+  HT(execute, V8.Execute, 1000000, MICROSECOND)                                \
+  /* Asm/Wasm */                                                               \
+  HT(wasm_instantiate_module_time, V8.WasmInstantiateModuleMicroSeconds,       \
+     1000000, MICROSECOND)                                                     \
+  HT(wasm_decode_module_time, V8.WasmDecodeModuleMicroSeconds, 1000000,        \
+     MICROSECOND)                                                              \
+  HT(wasm_decode_function_time, V8.WasmDecodeFunctionMicroSeconds, 1000000,    \
+     MICROSECOND)                                                              \
+  HT(wasm_compile_module_time, V8.WasmCompileModuleMicroSeconds, 1000000,      \
+     MICROSECOND)                                                              \
+  HT(wasm_compile_function_time, V8.WasmCompileFunctionMicroSeconds, 1000000,  \
+     MICROSECOND)
 
 #define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
   AHT(compile_lazy, V8.CompileLazyMicroSeconds)
@@ -651,10 +906,17 @@
   HM(heap_sample_code_space_committed, V8.MemoryHeapSampleCodeSpaceCommitted) \
   HM(heap_sample_maximum_committed, V8.MemoryHeapSampleMaximumCommitted)
 
-#define HISTOGRAM_MEMORY_LIST(HM)                   \
-  HM(memory_heap_committed, V8.MemoryHeapCommitted) \
-  HM(memory_heap_used, V8.MemoryHeapUsed)
-
+#define HISTOGRAM_MEMORY_LIST(HM)                                              \
+  HM(memory_heap_committed, V8.MemoryHeapCommitted)                            \
+  HM(memory_heap_used, V8.MemoryHeapUsed)                                      \
+  /* Asm/Wasm */                                                               \
+  HM(wasm_decode_module_peak_memory_bytes, V8.WasmDecodeModulePeakMemoryBytes) \
+  HM(wasm_compile_function_peak_memory_bytes,                                  \
+     V8.WasmCompileFunctionPeakMemoryBytes)                                    \
+  HM(wasm_min_mem_pages_count, V8.WasmMinMemPagesCount)                        \
+  HM(wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount)                        \
+  HM(wasm_function_size_bytes, V8.WasmFunctionSizeBytes)                       \
+  HM(wasm_module_size_bytes, V8.WasmModuleSizeBytes)
 
 // WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
 // Intellisense to crash.  It was broken into two macros (each of length 40
@@ -703,7 +965,6 @@
   /* The store-buffer implementation of the write barrier. */         \
   SC(store_buffer_overflows, V8.StoreBufferOverflows)
 
-
 #define STATS_COUNTER_LIST_2(SC)                                               \
   /* Number of code stubs. */                                                  \
   SC(code_stubs, V8.CodeStubs)                                                 \
@@ -743,8 +1004,6 @@
   SC(enum_cache_hits, V8.EnumCacheHits)                                        \
   SC(enum_cache_misses, V8.EnumCacheMisses)                                    \
   SC(fast_new_closure_total, V8.FastNewClosureTotal)                           \
-  SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized)            \
-  SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized)    \
   SC(string_add_runtime, V8.StringAddRuntime)                                  \
   SC(string_add_native, V8.StringAddNative)                                    \
   SC(string_add_runtime_ext_to_one_byte, V8.StringAddRuntimeExtToOneByte)      \
@@ -757,13 +1016,12 @@
   SC(number_to_string_native, V8.NumberToStringNative)                         \
   SC(number_to_string_runtime, V8.NumberToStringRuntime)                       \
   SC(math_atan2_runtime, V8.MathAtan2Runtime)                                  \
-  SC(math_clz32_runtime, V8.MathClz32Runtime)                                  \
   SC(math_exp_runtime, V8.MathExpRuntime)                                      \
   SC(math_log_runtime, V8.MathLogRuntime)                                      \
   SC(math_pow_runtime, V8.MathPowRuntime)                                      \
   SC(stack_interrupts, V8.StackInterrupts)                                     \
   SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                          \
-  SC(runtime_calls, V8.RuntimeCalls)                          \
+  SC(runtime_calls, V8.RuntimeCalls)                                           \
   SC(bounds_checks_eliminated, V8.BoundsChecksEliminated)                      \
   SC(bounds_checks_hoisted, V8.BoundsChecksHoisted)                            \
   SC(soft_deopts_requested, V8.SoftDeoptsRequested)                            \
diff --git a/src/crankshaft/arm/lithium-arm.cc b/src/crankshaft/arm/lithium-arm.cc
index 4072982..d8ee9cd 100644
--- a/src/crankshaft/arm/lithium-arm.cc
+++ b/src/crankshaft/arm/lithium-arm.cc
@@ -877,7 +877,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -934,17 +934,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), cp);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
@@ -2351,13 +2340,18 @@
 
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
   LOperand* size = UseRegisterOrConstant(instr->size());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
-  return AssignPointerMap(DefineAsRegister(result));
+  if (instr->IsAllocationFolded()) {
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 
diff --git a/src/crankshaft/arm/lithium-arm.h b/src/crankshaft/arm/lithium-arm.h
index 60fe79d..2ec992f 100644
--- a/src/crankshaft/arm/lithium-arm.h
+++ b/src/crankshaft/arm/lithium-arm.h
@@ -67,6 +67,7 @@
   V(Drop)                                    \
   V(Dummy)                                   \
   V(DummyUse)                                \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -78,7 +79,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -151,7 +151,6 @@
   V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
-
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
   Opcode opcode() const final { return LInstruction::k##type; } \
   void CompileToNative(LCodeGen* generator) final;              \
@@ -1135,22 +1134,6 @@
 };
 
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() const { return inputs_[0]; }
-  LOperand* left() const { return inputs_[1]; }
-  LOperand* right() const { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -2391,6 +2374,21 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* size() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
 
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
index c64aac3..340642a 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -329,8 +329,6 @@
       } else {
         __ bl(&call_deopt_entry);
       }
-      info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                   table_entry->deopt_info.inlining_id);
       masm()->CheckConstPool(false, false);
     }
 
@@ -823,7 +821,7 @@
     __ stop("trap_on_deopt", condition);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
@@ -832,7 +830,6 @@
       !info()->saves_caller_doubles()) {
     DeoptComment(deopt_info);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
                                             !frame_is_built_);
@@ -2498,16 +2495,6 @@
 }
 
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(r0));
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
@@ -4031,11 +4018,6 @@
 
   if (instr->NeedsCanonicalization()) {
     // Force a canonical NaN.
-    if (masm()->emit_debug_code()) {
-      __ vmrs(ip);
-      __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
-      __ Assert(ne, kDefaultNaNModeNotSet);
-    }
     __ VFPCanonicalizeNaN(double_scratch, value);
     __ vstr(double_scratch, scratch, 0);
   } else {
@@ -4193,7 +4175,15 @@
 
     LOperand* key = instr->key();
     if (key->IsConstantOperand()) {
-      __ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
+      LConstantOperand* constant_key = LConstantOperand::cast(key);
+      int32_t int_key = ToInteger32(constant_key);
+      if (Smi::IsValid(int_key)) {
+        __ mov(r3, Operand(Smi::FromInt(int_key)));
+      } else {
+        // We should never get here at runtime because there is a smi check on
+        // the key before this point.
+        __ stop("expected smi");
+      }
     } else {
       __ Move(r3, ToRegister(key));
       __ SmiTag(r3);
@@ -4494,7 +4484,7 @@
 
   if (FLAG_inline_new) {
     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
     __ b(&done);
   }
 
@@ -4518,15 +4508,13 @@
     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
-    __ sub(r0, r0, Operand(kHeapObjectTag));
     __ StoreToSafepointRegisterSlot(r0, dst);
   }
 
   // Done. Put the value in dbl_scratch into the value of the allocated heap
   // number.
   __ bind(&done);
-  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
-  __ add(dst, dst, Operand(kHeapObjectTag));
+  __ vstr(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
 }
 
 
@@ -4551,16 +4539,12 @@
   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   if (FLAG_inline_new) {
     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
-    // We want the untagged address first for performance
-    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
-                          DONT_TAG_RESULT);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
   } else {
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
-  // Now that we have finished with the object's real address tag it
-  __ add(reg, reg, Operand(kHeapObjectTag));
+  __ vstr(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
 }
 
 
@@ -4581,7 +4565,6 @@
   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
-  __ sub(r0, r0, Operand(kHeapObjectTag));
   __ StoreToSafepointRegisterSlot(r0, reg);
 }
 
@@ -5105,7 +5088,7 @@
   Register scratch2 = ToRegister(instr->temp2());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -5114,6 +5097,11 @@
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
 
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
     CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5181,6 +5169,49 @@
   CallRuntimeFromDeferred(
       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(r0, result);
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    Register top_address = scratch0();
+    __ sub(r0, r0, Operand(kHeapObjectTag));
+    __ mov(top_address, Operand(allocation_top));
+    __ str(r0, MemOperand(top_address));
+    __ add(r0, r0, Operand(kHeapObjectTag));
+  }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  }
 }
 
 
diff --git a/src/crankshaft/arm/lithium-codegen-arm.h b/src/crankshaft/arm/lithium-codegen-arm.h
index 8bbacc3..30a26f2 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/src/crankshaft/arm/lithium-codegen-arm.h
@@ -135,8 +135,6 @@
 #undef DECLARE_DO
 
  private:
-  LanguageMode language_mode() const { return info()->language_mode(); }
-
   Scope* scope() const { return scope_; }
 
   Register scratch0() { return r9; }
diff --git a/src/crankshaft/arm64/lithium-arm64.cc b/src/crankshaft/arm64/lithium-arm64.cc
index 6cfc846..2154398 100644
--- a/src/crankshaft/arm64/lithium-arm64.cc
+++ b/src/crankshaft/arm64/lithium-arm64.cc
@@ -726,7 +726,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -841,14 +841,20 @@
 
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
   LOperand* size = UseRegisterOrConstant(instr->size());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
-  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2, temp3);
-  return AssignPointerMap(DefineAsRegister(result));
+  if (instr->IsAllocationFolded()) {
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
+    LAllocate* result =
+        new (zone()) LAllocate(context, size, temp1, temp2, temp3);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 
@@ -1461,17 +1467,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), cp);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
diff --git a/src/crankshaft/arm64/lithium-arm64.h b/src/crankshaft/arm64/lithium-arm64.h
index 237487f..383e5c3 100644
--- a/src/crankshaft/arm64/lithium-arm64.h
+++ b/src/crankshaft/arm64/lithium-arm64.h
@@ -70,6 +70,7 @@
   V(Drop)                                    \
   V(Dummy)                                   \
   V(DummyUse)                                \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -81,7 +82,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -163,7 +163,6 @@
   V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
-
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
   Opcode opcode() const final { return LInstruction::k##type; } \
   void CompileToNative(LCodeGen* generator) final;              \
@@ -626,6 +625,21 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* size() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
 
 class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
  public:
@@ -1363,22 +1377,6 @@
 };
 
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() const { return inputs_[0]; }
-  LOperand* left() const { return inputs_[1]; }
-  LOperand* right() const { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 2> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 9bbc8b8..ebc5277 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -599,7 +599,7 @@
   Comment(";;; Prologue begin");
 
   // Allocate a local context if needed.
-  if (info()->num_heap_slots() > 0) {
+  if (info()->scope()->num_heap_slots() > 0) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is in x1.
@@ -775,8 +775,6 @@
         // table.
         __ Bl(&call_deopt_entry);
       }
-      info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                   table_entry->deopt_info.inlining_id);
 
       masm()->CheckConstPool(false, false);
     }
@@ -892,7 +890,7 @@
     __ Bind(&dont_trap);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to build frame, or restore caller doubles.
@@ -900,7 +898,6 @@
       frame_is_built_ && !info()->saves_caller_doubles()) {
     DeoptComment(deopt_info);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry* table_entry =
         new (zone()) Deoptimizer::JumpTableEntry(
@@ -1416,7 +1413,7 @@
   Register temp2 = ToRegister(instr->temp2());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -1426,6 +1423,11 @@
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
 
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
     CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -1487,6 +1489,49 @@
   CallRuntimeFromDeferred(
       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    Register top_address = x10;
+    __ Sub(x0, x0, Operand(kHeapObjectTag));
+    __ Mov(top_address, Operand(allocation_top));
+    __ Str(x0, MemOperand(top_address));
+    __ Add(x0, x0, Operand(kHeapObjectTag));
+  }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  }
 }
 
 
@@ -2758,16 +2803,6 @@
 }
 
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(x0));
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
diff --git a/src/crankshaft/hydrogen-gvn.cc b/src/crankshaft/hydrogen-gvn.cc
index 07bfabc..e6ddd75 100644
--- a/src/crankshaft/hydrogen-gvn.cc
+++ b/src/crankshaft/hydrogen-gvn.cc
@@ -637,17 +637,12 @@
 }
 
 
-bool HGlobalValueNumberingPhase::AllowCodeMotion() {
-  return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
-}
-
-
 bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
                                             HBasicBlock* loop_header) {
   // If we've disabled code motion or we're in a block that unconditionally
   // deoptimizes, don't move any instructions.
-  return AllowCodeMotion() && !instr->block()->IsDeoptimizing() &&
-      instr->block()->IsReachable();
+  return graph()->allow_code_motion() && !instr->block()->IsDeoptimizing() &&
+         instr->block()->IsReachable();
 }
 
 
diff --git a/src/crankshaft/hydrogen-gvn.h b/src/crankshaft/hydrogen-gvn.h
index a5e2168..9a8d407 100644
--- a/src/crankshaft/hydrogen-gvn.h
+++ b/src/crankshaft/hydrogen-gvn.h
@@ -126,7 +126,6 @@
   void ProcessLoopBlock(HBasicBlock* block,
                         HBasicBlock* before_loop,
                         SideEffects loop_kills);
-  bool AllowCodeMotion();
   bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
   TrackedEffects Print(SideEffects side_effects) {
     return TrackedEffects(&side_effects_tracker_, side_effects);
diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc
index 7de8b5d..5e4ad37 100644
--- a/src/crankshaft/hydrogen-instructions.cc
+++ b/src/crankshaft/hydrogen-instructions.cc
@@ -798,7 +798,6 @@
     case HValue::kHasCachedArrayIndexAndBranch:
     case HValue::kHasInstanceTypeAndBranch:
     case HValue::kInnerAllocatedObject:
-    case HValue::kInstanceOf:
     case HValue::kIsSmiAndBranch:
     case HValue::kIsStringAndBranch:
     case HValue::kIsUndetectableAndBranch:
@@ -1203,7 +1202,6 @@
 String* TypeOfString(HConstant* constant, Isolate* isolate) {
   Heap* heap = isolate->heap();
   if (constant->HasNumberValue()) return heap->number_string();
-  if (constant->IsUndetectable()) return heap->undefined_string();
   if (constant->HasStringValue()) return heap->string_string();
   switch (constant->GetInstanceType()) {
     case ODDBALL_TYPE: {
@@ -1232,6 +1230,7 @@
       return nullptr;
     }
     default:
+      if (constant->IsUndetectable()) return heap->undefined_string();
       if (constant->IsCallable()) return heap->function_string();
       return heap->object_string();
   }
@@ -1652,12 +1651,6 @@
 }
 
 
-std::ostream& HInstanceOf::PrintDataTo(std::ostream& os) const {  // NOLINT
-  return os << NameOf(left()) << " " << NameOf(right()) << " "
-            << NameOf(context());
-}
-
-
 Range* HValue::InferRange(Zone* zone) {
   Range* result;
   if (representation().IsSmi() || type().IsSmi()) {
@@ -3126,6 +3119,7 @@
 bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
                                           HValue* dominator) {
   DCHECK(side_effect == kNewSpacePromotion);
+  DCHECK(!IsAllocationFolded());
   Zone* zone = block()->zone();
   Isolate* isolate = block()->isolate();
   if (!FLAG_use_allocation_folding) return false;
@@ -3153,7 +3147,8 @@
   HValue* current_size = size();
 
   // TODO(hpayer): Add support for non-constant allocation in dominator.
-  if (!dominator_size->IsInteger32Constant()) {
+  if (!current_size->IsInteger32Constant() ||
+      !dominator_size->IsInteger32Constant()) {
     if (FLAG_trace_allocation_folding) {
       PrintF("#%d (%s) cannot fold into #%d (%s), "
              "dynamic allocation size in dominator\n",
@@ -3171,32 +3166,6 @@
     return false;
   }
 
-  if (!has_size_upper_bound()) {
-    if (FLAG_trace_allocation_folding) {
-      PrintF("#%d (%s) cannot fold into #%d (%s), "
-             "can't estimate total allocation size\n",
-          id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
-    }
-    return false;
-  }
-
-  if (!current_size->IsInteger32Constant()) {
-    // If it's not constant then it is a size_in_bytes calculation graph
-    // like this: (const_header_size + const_element_size * size).
-    DCHECK(current_size->IsInstruction());
-
-    HInstruction* current_instr = HInstruction::cast(current_size);
-    if (!current_instr->Dominates(dominator_allocate)) {
-      if (FLAG_trace_allocation_folding) {
-        PrintF("#%d (%s) cannot fold into #%d (%s), dynamic size "
-               "value does not dominate target allocation\n",
-            id(), Mnemonic(), dominator_allocate->id(),
-            dominator_allocate->Mnemonic());
-      }
-      return false;
-    }
-  }
-
   DCHECK(
       (IsNewSpaceAllocation() && dominator_allocate->IsNewSpaceAllocation()) ||
       (IsOldSpaceAllocation() && dominator_allocate->IsOldSpaceAllocation()));
@@ -3213,7 +3182,7 @@
     }
   }
 
-  int32_t current_size_max_value = size_upper_bound()->GetInteger32Constant();
+  int32_t current_size_max_value = size()->GetInteger32Constant();
   int32_t new_dominator_size = dominator_size_constant + current_size_max_value;
 
   // Since we clear the first word after folded memory, we cannot use the
@@ -3227,27 +3196,9 @@
     return false;
   }
 
-  HInstruction* new_dominator_size_value;
-
-  if (current_size->IsInteger32Constant()) {
-    new_dominator_size_value = HConstant::CreateAndInsertBefore(
-        isolate, zone, context(), new_dominator_size, Representation::None(),
-        dominator_allocate);
-  } else {
-    HValue* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
-        isolate, zone, context(), dominator_size_constant,
-        Representation::Integer32(), dominator_allocate);
-
-    // Add old and new size together and insert.
-    current_size->ChangeRepresentation(Representation::Integer32());
-
-    new_dominator_size_value = HAdd::New(
-        isolate, zone, context(), new_dominator_size_constant, current_size);
-    new_dominator_size_value->ClearFlag(HValue::kCanOverflow);
-    new_dominator_size_value->ChangeRepresentation(Representation::Integer32());
-
-    new_dominator_size_value->InsertBefore(dominator_allocate);
-  }
+  HInstruction* new_dominator_size_value = HConstant::CreateAndInsertBefore(
+      isolate, zone, context(), new_dominator_size, Representation::None(),
+      dominator_allocate);
 
   dominator_allocate->UpdateSize(new_dominator_size_value);
 
@@ -3257,103 +3208,45 @@
     }
   }
 
-  bool keep_heap_iterable = FLAG_log_gc || FLAG_heap_stats;
-#ifdef VERIFY_HEAP
-  keep_heap_iterable = keep_heap_iterable || FLAG_verify_heap;
-#endif
-
-  if (keep_heap_iterable) {
-    dominator_allocate->MakePrefillWithFiller();
-  } else {
-    // TODO(hpayer): This is a short-term hack to make allocation mementos
-    // work again in new space.
-    dominator_allocate->ClearNextMapWord(original_object_size);
+  if (IsAllocationFoldingDominator()) {
+    DeleteAndReplaceWith(dominator_allocate);
+    if (FLAG_trace_allocation_folding) {
+      PrintF(
+          "#%d (%s) folded dominator into #%d (%s), new dominator size: %d\n",
+          id(), Mnemonic(), dominator_allocate->id(),
+          dominator_allocate->Mnemonic(), new_dominator_size);
+    }
+    return true;
   }
 
-  dominator_allocate->UpdateClearNextMapWord(MustClearNextMapWord());
+  if (!dominator_allocate->IsAllocationFoldingDominator()) {
+    HAllocate* first_alloc =
+        HAllocate::New(isolate, zone, dominator_allocate->context(),
+                       dominator_size, dominator_allocate->type(),
+                       IsNewSpaceAllocation() ? NOT_TENURED : TENURED,
+                       JS_OBJECT_TYPE, block()->graph()->GetConstant0());
+    first_alloc->InsertAfter(dominator_allocate);
+    dominator_allocate->ReplaceAllUsesWith(first_alloc);
+    dominator_allocate->MakeAllocationFoldingDominator();
+    first_alloc->MakeFoldedAllocation(dominator_allocate);
+    if (FLAG_trace_allocation_folding) {
+      PrintF("#%d (%s) inserted for dominator #%d (%s)\n", first_alloc->id(),
+             first_alloc->Mnemonic(), dominator_allocate->id(),
+             dominator_allocate->Mnemonic());
+    }
+  }
 
-  // After that replace the dominated allocate instruction.
-  HInstruction* inner_offset = HConstant::CreateAndInsertBefore(
-      isolate, zone, context(), dominator_size_constant, Representation::None(),
-      this);
+  MakeFoldedAllocation(dominator_allocate);
 
-  HInstruction* dominated_allocate_instr = HInnerAllocatedObject::New(
-      isolate, zone, context(), dominator_allocate, inner_offset, type());
-  dominated_allocate_instr->InsertBefore(this);
-  DeleteAndReplaceWith(dominated_allocate_instr);
   if (FLAG_trace_allocation_folding) {
-    PrintF("#%d (%s) folded into #%d (%s)\n",
-        id(), Mnemonic(), dominator_allocate->id(),
-        dominator_allocate->Mnemonic());
+    PrintF("#%d (%s) folded into #%d (%s), new dominator size: %d\n", id(),
+           Mnemonic(), dominator_allocate->id(), dominator_allocate->Mnemonic(),
+           new_dominator_size);
   }
   return true;
 }
 
 
-void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
-  DCHECK(filler_free_space_size_ != NULL);
-  Zone* zone = block()->zone();
-  // We must explicitly force Smi representation here because on x64 we
-  // would otherwise automatically choose int32, but the actual store
-  // requires a Smi-tagged value.
-  HConstant* new_free_space_size = HConstant::CreateAndInsertBefore(
-      block()->isolate(), zone, context(),
-      filler_free_space_size_->value()->GetInteger32Constant() +
-          free_space_size,
-      Representation::Smi(), filler_free_space_size_);
-  filler_free_space_size_->UpdateValue(new_free_space_size);
-}
-
-
-void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
-  DCHECK(filler_free_space_size_ == NULL);
-  Isolate* isolate = block()->isolate();
-  Zone* zone = block()->zone();
-  HInstruction* free_space_instr =
-      HInnerAllocatedObject::New(isolate, zone, context(), dominating_allocate_,
-                                 dominating_allocate_->size(), type());
-  free_space_instr->InsertBefore(this);
-  HConstant* filler_map = HConstant::CreateAndInsertAfter(
-      zone, Unique<Map>::CreateImmovable(isolate->factory()->free_space_map()),
-      true, free_space_instr);
-  HInstruction* store_map =
-      HStoreNamedField::New(isolate, zone, context(), free_space_instr,
-                            HObjectAccess::ForMap(), filler_map);
-  store_map->SetFlag(HValue::kHasNoObservableSideEffects);
-  store_map->InsertAfter(filler_map);
-
-  // We must explicitly force Smi representation here because on x64 we
-  // would otherwise automatically choose int32, but the actual store
-  // requires a Smi-tagged value.
-  HConstant* filler_size =
-      HConstant::CreateAndInsertAfter(isolate, zone, context(), free_space_size,
-                                      Representation::Smi(), store_map);
-  // Must force Smi representation for x64 (see comment above).
-  HObjectAccess access = HObjectAccess::ForMapAndOffset(
-      isolate->factory()->free_space_map(), FreeSpace::kSizeOffset,
-      Representation::Smi());
-  HStoreNamedField* store_size = HStoreNamedField::New(
-      isolate, zone, context(), free_space_instr, access, filler_size);
-  store_size->SetFlag(HValue::kHasNoObservableSideEffects);
-  store_size->InsertAfter(filler_size);
-  filler_free_space_size_ = store_size;
-}
-
-
-void HAllocate::ClearNextMapWord(int offset) {
-  if (MustClearNextMapWord()) {
-    Zone* zone = block()->zone();
-    HObjectAccess access =
-        HObjectAccess::ForObservableJSObjectOffset(offset);
-    HStoreNamedField* clear_next_map =
-        HStoreNamedField::New(block()->isolate(), zone, context(), this, access,
-                              block()->graph()->GetConstant0());
-    clear_next_map->ClearAllSideEffects();
-    clear_next_map->InsertAfter(this);
-  }
-}
-
-
 std::ostream& HAllocate::PrintDataTo(std::ostream& os) const {  // NOLINT
   os << NameOf(size()) << " (";
   if (IsNewSpaceAllocation()) os << "N";
diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h
index 196a14f..fdb1fd6 100644
--- a/src/crankshaft/hydrogen-instructions.h
+++ b/src/crankshaft/hydrogen-instructions.h
@@ -95,7 +95,6 @@
   V(HasCachedArrayIndexAndBranch)             \
   V(HasInstanceTypeAndBranch)                 \
   V(InnerAllocatedObject)                     \
-  V(InstanceOf)                               \
   V(InvokeFunction)                           \
   V(HasInPrototypeChainAndBranch)             \
   V(IsStringAndBranch)                        \
@@ -2498,7 +2497,7 @@
   // Indicates if we support a double (and int32) output for Math.floor and
   // Math.round.
   bool SupportsFlexibleFloorAndRound() const {
-#if V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
     return true;
 #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
     return CpuFeatures::IsSupported(SSE4_1);
@@ -4271,27 +4270,6 @@
 };
 
 
-class HInstanceOf final : public HBinaryOperation {
- public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
-
- private:
-  HInstanceOf(HValue* context, HValue* left, HValue* right)
-      : HBinaryOperation(context, left, right, HType::Boolean()) {
-    set_representation(Representation::Tagged());
-    SetAllSideEffects();
-  }
-};
-
-
 class HHasInPrototypeChainAndBranch final
     : public HTemplateControlInstruction<2, 2> {
  public:
@@ -4939,8 +4917,7 @@
   FeedbackVectorSlot slot_;
 };
 
-
-class HAllocate final : public HTemplateInstruction<2> {
+class HAllocate final : public HTemplateInstruction<3> {
  public:
   static bool CompatibleInstanceTypes(InstanceType type1,
                                       InstanceType type2) {
@@ -4951,9 +4928,10 @@
   static HAllocate* New(
       Isolate* isolate, Zone* zone, HValue* context, HValue* size, HType type,
       PretenureFlag pretenure_flag, InstanceType instance_type,
+      HValue* dominator,
       Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null()) {
-    return new(zone) HAllocate(context, size, type, pretenure_flag,
-        instance_type, allocation_site);
+    return new (zone) HAllocate(context, size, type, pretenure_flag,
+                                instance_type, dominator, allocation_site);
   }
 
   // Maximum instance size for which allocations will be inlined.
@@ -4961,13 +4939,7 @@
 
   HValue* context() const { return OperandAt(0); }
   HValue* size() const { return OperandAt(1); }
-
-  bool has_size_upper_bound() { return size_upper_bound_ != NULL; }
-  HConstant* size_upper_bound() { return size_upper_bound_; }
-  void set_size_upper_bound(HConstant* value) {
-    DCHECK(size_upper_bound_ == NULL);
-    size_upper_bound_ = value;
-  }
+  HValue* allocation_folding_dominator() const { return OperandAt(2); }
 
   Representation RequiredInputRepresentation(int index) override {
     if (index == 0) {
@@ -5005,14 +4977,28 @@
     flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
   }
 
-  bool MustClearNextMapWord() const {
-    return (flags_ & CLEAR_NEXT_MAP_WORD) != 0;
-  }
-
   void MakeDoubleAligned() {
     flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
   }
 
+  void MakeAllocationFoldingDominator() {
+    flags_ =
+        static_cast<HAllocate::Flags>(flags_ | ALLOCATION_FOLDING_DOMINATOR);
+  }
+
+  bool IsAllocationFoldingDominator() {
+    return (flags_ & ALLOCATION_FOLDING_DOMINATOR) != 0;
+  }
+
+  void MakeFoldedAllocation(HAllocate* dominator) {
+    flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATION_FOLDED);
+    ClearFlag(kTrackSideEffectDominators);
+    ClearChangesFlag(kNewSpacePromotion);
+    SetOperandAt(2, dominator);
+  }
+
+  bool IsAllocationFolded() { return (flags_ & ALLOCATION_FOLDED) != 0; }
+
   bool HandleSideEffectDominator(GVNFlag side_effect,
                                  HValue* dominator) override;
 
@@ -5026,23 +5012,19 @@
     ALLOCATE_IN_OLD_SPACE = 1 << 2,
     ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
     PREFILL_WITH_FILLER = 1 << 4,
-    CLEAR_NEXT_MAP_WORD = 1 << 5
+    ALLOCATION_FOLDING_DOMINATOR = 1 << 5,
+    ALLOCATION_FOLDED = 1 << 6
   };
 
-  HAllocate(HValue* context,
-            HValue* size,
-            HType type,
-            PretenureFlag pretenure_flag,
-            InstanceType instance_type,
-            Handle<AllocationSite> allocation_site =
-                Handle<AllocationSite>::null())
-      : HTemplateInstruction<2>(type),
-        flags_(ComputeFlags(pretenure_flag, instance_type)),
-        dominating_allocate_(NULL),
-        filler_free_space_size_(NULL),
-        size_upper_bound_(NULL) {
+  HAllocate(
+      HValue* context, HValue* size, HType type, PretenureFlag pretenure_flag,
+      InstanceType instance_type, HValue* dominator,
+      Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null())
+      : HTemplateInstruction<3>(type),
+        flags_(ComputeFlags(pretenure_flag, instance_type)) {
     SetOperandAt(0, context);
     UpdateSize(size);
+    SetOperandAt(2, dominator);
     set_representation(Representation::Tagged());
     SetFlag(kTrackSideEffectDominators);
     SetChangesFlag(kNewSpacePromotion);
@@ -5072,46 +5054,20 @@
     if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
       flags = static_cast<Flags>(flags | PREFILL_WITH_FILLER);
     }
-    if (pretenure_flag == NOT_TENURED &&
-        AllocationSite::CanTrack(instance_type)) {
-      flags = static_cast<Flags>(flags | CLEAR_NEXT_MAP_WORD);
-    }
     return flags;
   }
 
-  void UpdateClearNextMapWord(bool clear_next_map_word) {
-    flags_ = static_cast<Flags>(clear_next_map_word
-                                ? flags_ | CLEAR_NEXT_MAP_WORD
-                                : flags_ & ~CLEAR_NEXT_MAP_WORD);
-  }
-
   void UpdateSize(HValue* size) {
     SetOperandAt(1, size);
-    if (size->IsInteger32Constant()) {
-      size_upper_bound_ = HConstant::cast(size);
-    } else {
-      size_upper_bound_ = NULL;
-    }
   }
 
-  HAllocate* GetFoldableDominator(HAllocate* dominator);
-
-  void UpdateFreeSpaceFiller(int32_t filler_size);
-
-  void CreateFreeSpaceFiller(int32_t filler_size);
-
   bool IsFoldable(HAllocate* allocate) {
     return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) ||
            (IsOldSpaceAllocation() && allocate->IsOldSpaceAllocation());
   }
 
-  void ClearNextMapWord(int offset);
-
   Flags flags_;
   Handle<Map> known_initial_map_;
-  HAllocate* dominating_allocate_;
-  HStoreNamedField* filler_free_space_size_;
-  HConstant* size_upper_bound_;
 };
 
 
@@ -5183,9 +5139,20 @@
 inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
                                             HValue* value,
                                             HValue* dominator) {
+  // There may be multiple inner allocates dominated by one allocate.
   while (object->IsInnerAllocatedObject()) {
     object = HInnerAllocatedObject::cast(object)->base_object();
   }
+
+  if (object->IsAllocate()) {
+    HAllocate* allocate = HAllocate::cast(object);
+    if (allocate->IsAllocationFolded()) {
+      HValue* dominator = allocate->allocation_folding_dominator();
+      DCHECK(HAllocate::cast(dominator)->IsAllocationFoldingDominator());
+      object = dominator;
+    }
+  }
+
   if (object->IsConstant() &&
       HConstant::cast(object)->HasExternalReferenceValue()) {
     // Stores to external references require no write barriers
@@ -5226,10 +5193,7 @@
     // hole value. This is used for checking for loading of uninitialized
     // harmony bindings where we deoptimize into full-codegen generated code
     // which will subsequently throw a reference error.
-    kCheckDeoptimize,
-    // Load and check the value of the context slot. Return undefined if it's
-    // the hole value. This is used for non-harmony const assignments
-    kCheckReturnUndefined
+    kCheckDeoptimize
   };
 
   HLoadContextSlot(HValue* context, int slot_index, Mode mode)
@@ -5282,9 +5246,7 @@
     // hole value. This is used for checking for assignments to uninitialized
     // harmony bindings where we deoptimize into full-codegen generated code
     // which will subsequently throw a reference error.
-    kCheckDeoptimize,
-    // Check the previous value and ignore assignment if it isn't a hole value
-    kCheckIgnoreAssignment
+    kCheckDeoptimize
   };
 
   DECLARE_INSTRUCTION_FACTORY_P4(HStoreContextSlot, HValue*, int,
diff --git a/src/crankshaft/hydrogen-range-analysis.h b/src/crankshaft/hydrogen-range-analysis.h
index cff7026..eeac690 100644
--- a/src/crankshaft/hydrogen-range-analysis.h
+++ b/src/crankshaft/hydrogen-range-analysis.h
@@ -5,6 +5,7 @@
 #ifndef V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
 #define V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/crankshaft/hydrogen.h"
 
 namespace v8 {
@@ -21,7 +22,7 @@
   void Run();
 
  private:
-  void TraceRange(const char* msg, ...);
+  PRINTF_FORMAT(2, 3) void TraceRange(const char* msg, ...);
   void InferControlFlowRange(HCompareNumericAndBranch* test,
                              HBasicBlock* dest);
   void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc
index 0de6dac..9c5fa15 100644
--- a/src/crankshaft/hydrogen.cc
+++ b/src/crankshaft/hydrogen.cc
@@ -68,6 +68,177 @@
 namespace v8 {
 namespace internal {
 
+class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
+ public:
+  explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
+      : HOptimizedGraphBuilder(info) {}
+
+#define DEF_VISIT(type)                                      \
+  void Visit##type(type* node) override {                    \
+    SourcePosition old_position = SourcePosition::Unknown(); \
+    if (node->position() != RelocInfo::kNoPosition) {        \
+      old_position = source_position();                      \
+      SetSourcePosition(node->position());                   \
+    }                                                        \
+    HOptimizedGraphBuilder::Visit##type(node);               \
+    if (!old_position.IsUnknown()) {                         \
+      set_source_position(old_position);                     \
+    }                                                        \
+  }
+  EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type)                                      \
+  void Visit##type(type* node) override {                    \
+    SourcePosition old_position = SourcePosition::Unknown(); \
+    if (node->position() != RelocInfo::kNoPosition) {        \
+      old_position = source_position();                      \
+      SetSourcePosition(node->position());                   \
+    }                                                        \
+    HOptimizedGraphBuilder::Visit##type(node);               \
+    if (!old_position.IsUnknown()) {                         \
+      set_source_position(old_position);                     \
+    }                                                        \
+  }
+  STATEMENT_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type)                        \
+  void Visit##type(type* node) override {      \
+    HOptimizedGraphBuilder::Visit##type(node); \
+  }
+  DECLARATION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+HCompilationJob::Status HCompilationJob::CreateGraphImpl() {
+  bool dont_crankshaft = info()->shared_info()->dont_crankshaft();
+
+  // Optimization requires a version of fullcode with deoptimization support.
+  // Recompile the unoptimized version of the code if the current version
+  // doesn't have deoptimization support already.
+  // Otherwise, if we are gathering compilation time and space statistics
+  // for hydrogen, gather baseline statistics for a fullcode compilation.
+  bool should_recompile = !info()->shared_info()->has_deoptimization_support();
+  if (should_recompile || FLAG_hydrogen_stats) {
+    base::ElapsedTimer timer;
+    if (FLAG_hydrogen_stats) {
+      timer.Start();
+    }
+    if (!Compiler::EnsureDeoptimizationSupport(info())) {
+      return FAILED;
+    }
+    if (FLAG_hydrogen_stats) {
+      isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
+    }
+  }
+  DCHECK(info()->shared_info()->has_deoptimization_support());
+  DCHECK(!info()->shared_info()->never_compiled());
+
+  if (!isolate()->use_crankshaft() || dont_crankshaft) {
+    // Crankshaft is entirely disabled.
+    return FAILED;
+  }
+
+  // Check the whitelist for Crankshaft.
+  if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
+    return AbortOptimization(kHydrogenFilter);
+  }
+
+  Scope* scope = info()->scope();
+  if (LUnallocated::TooManyParameters(scope->num_parameters())) {
+    // Crankshaft would require too many Lithium operands.
+    return AbortOptimization(kTooManyParameters);
+  }
+
+  if (info()->is_osr() &&
+      LUnallocated::TooManyParametersOrStackSlots(scope->num_parameters(),
+                                                  scope->num_stack_slots())) {
+    // Crankshaft would require too many Lithium operands.
+    return AbortOptimization(kTooManyParametersLocals);
+  }
+
+  if (IsGeneratorFunction(info()->shared_info()->kind())) {
+    // Crankshaft does not support generators.
+    return AbortOptimization(kGenerator);
+  }
+
+  if (FLAG_trace_hydrogen) {
+    isolate()->GetHTracer()->TraceCompilation(info());
+  }
+
+  // Optimization could have been disabled by the parser. Note that this check
+  // is only needed because the Hydrogen graph builder is missing some bailouts.
+  if (info()->shared_info()->optimization_disabled()) {
+    return AbortOptimization(
+        info()->shared_info()->disable_optimization_reason());
+  }
+
+  HOptimizedGraphBuilder* graph_builder =
+      (info()->is_tracking_positions() || FLAG_trace_ic)
+          ? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
+          : new (info()->zone()) HOptimizedGraphBuilder(info());
+
+  // Type-check the function.
+  AstTyper(info()->isolate(), info()->zone(), info()->closure(),
+           info()->scope(), info()->osr_ast_id(), info()->literal(),
+           graph_builder->bounds())
+      .Run();
+
+  graph_ = graph_builder->CreateGraph();
+
+  if (isolate()->has_pending_exception()) {
+    return FAILED;
+  }
+
+  if (graph_ == NULL) return FAILED;
+
+  if (info()->dependencies()->HasAborted()) {
+    // Dependency has changed during graph creation. Let's try again later.
+    return RetryOptimization(kBailedOutDueToDependencyChange);
+  }
+
+  return SUCCEEDED;
+}
+
+HCompilationJob::Status HCompilationJob::OptimizeGraphImpl() {
+  DCHECK(graph_ != NULL);
+  BailoutReason bailout_reason = kNoReason;
+
+  if (graph_->Optimize(&bailout_reason)) {
+    chunk_ = LChunk::NewChunk(graph_);
+    if (chunk_ != NULL) return SUCCEEDED;
+  } else if (bailout_reason != kNoReason) {
+    info()->AbortOptimization(bailout_reason);
+  }
+
+  return FAILED;
+}
+
+HCompilationJob::Status HCompilationJob::GenerateCodeImpl() {
+  DCHECK(chunk_ != NULL);
+  DCHECK(graph_ != NULL);
+  {
+    // Deferred handles reference objects that were accessible during
+    // graph creation.  To make sure that we don't encounter inconsistencies
+    // between graph creation and code generation, we disallow accessing
+    // objects through deferred handles during the latter, with exceptions.
+    DisallowDeferredHandleDereference no_deferred_handle_deref;
+    Handle<Code> optimized_code = chunk_->Codegen();
+    if (optimized_code.is_null()) {
+      if (info()->bailout_reason() == kNoReason) {
+        return AbortOptimization(kCodeGenerationFailed);
+      }
+      return FAILED;
+    }
+    RegisterWeakObjectsInOptimizedCode(optimized_code);
+    info()->SetCode(optimized_code);
+  }
+  // Add to the weak list of optimized code objects.
+  info()->context()->native_context()->AddOptimizedCode(*info()->code());
+  return SUCCEEDED;
+}
+
 HBasicBlock::HBasicBlock(HGraph* graph)
     : block_id_(graph->GetNextBlockID()),
       graph_(graph),
@@ -1188,6 +1359,9 @@
 HGraph* HGraphBuilder::CreateGraph() {
   graph_ = new (zone()) HGraph(info_, descriptor_);
   if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
+  if (!info_->IsStub() && info_->is_tracking_positions()) {
+    TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown());
+  }
   CompilationPhase phase("H_Block building", info_);
   set_current_block(graph()->entry_block());
   if (!BuildGraph()) return NULL;
@@ -1195,6 +1369,52 @@
   return graph_;
 }
 
+int HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+                                        SourcePosition position) {
+  DCHECK(info_->is_tracking_positions());
+
+  int inline_id = static_cast<int>(graph()->inlined_function_infos().size());
+  HInlinedFunctionInfo info(shared->start_position());
+  if (!shared->script()->IsUndefined()) {
+    Handle<Script> script(Script::cast(shared->script()));
+
+    if (FLAG_hydrogen_track_positions && !script->source()->IsUndefined()) {
+      CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+      Object* source_name = script->name();
+      OFStream os(tracing_scope.file());
+      os << "--- FUNCTION SOURCE (";
+      if (source_name->IsString()) {
+        os << String::cast(source_name)->ToCString().get() << ":";
+      }
+      os << shared->DebugName()->ToCString().get() << ") id{";
+      os << info_->optimization_id() << "," << inline_id << "} ---\n";
+      {
+        DisallowHeapAllocation no_allocation;
+        int start = shared->start_position();
+        int len = shared->end_position() - start;
+        String::SubStringRange source(String::cast(script->source()), start,
+                                      len);
+        for (const auto& c : source) {
+          os << AsReversiblyEscapedUC16(c);
+        }
+      }
+
+      os << "\n--- END ---\n";
+    }
+  }
+
+  graph()->inlined_function_infos().push_back(info);
+
+  if (FLAG_hydrogen_track_positions && inline_id != 0) {
+    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
+    os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
+       << info_->optimization_id() << "," << inline_id << "} AS " << inline_id
+       << " AT " << position << std::endl;
+  }
+
+  return inline_id;
+}
 
 HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
   DCHECK(current_block() != NULL);
@@ -1822,7 +2042,7 @@
   // Allocate the JSIteratorResult object.
   HValue* result =
       Add<HAllocate>(Add<HConstant>(JSIteratorResult::kSize), HType::JSObject(),
-                     NOT_TENURED, JS_OBJECT_TYPE);
+                     NOT_TENURED, JS_OBJECT_TYPE, graph()->GetConstant0());
 
   // Initialize the JSIteratorResult object.
   HValue* native_context = BuildGetNativeContext();
@@ -1859,9 +2079,9 @@
   HValue* size = BuildCalculateElementsSize(elements_kind, length);
 
   // Allocate the JSRegExpResult and the FixedArray in one step.
-  HValue* result = Add<HAllocate>(
-      Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
-      NOT_TENURED, JS_ARRAY_TYPE);
+  HValue* result =
+      Add<HAllocate>(Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
+                     NOT_TENURED, JS_ARRAY_TYPE, graph()->GetConstant0());
 
   // Initialize the JSRegExpResult header.
   HValue* native_context = Add<HLoadNamedField>(
@@ -1895,12 +2115,6 @@
   HAllocate* elements = BuildAllocateElements(elements_kind, size);
   BuildInitializeElementsHeader(elements, elements_kind, length);
 
-  if (!elements->has_size_upper_bound()) {
-    HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
-        elements_kind, max_length->Integer32Value());
-    elements->set_size_upper_bound(size_in_bytes_upper_bound);
-  }
-
   Add<HStoreNamedField>(
       result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
       elements);
@@ -2062,9 +2276,8 @@
   Callable callable = CodeFactory::ToNumber(isolate());
   HValue* stub = Add<HConstant>(callable.code());
   HValue* values[] = {context(), input};
-  HCallWithDescriptor* instr =
-      Add<HCallWithDescriptor>(stub, 0, callable.descriptor(),
-                               Vector<HValue*>(values, arraysize(values)));
+  HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
+      stub, 0, callable.descriptor(), ArrayVector(values));
   instr->set_type(HType::TaggedNumber());
   return instr;
 }
@@ -2177,8 +2390,8 @@
 
   // Perform the actual allocation.
   HAllocate* object = Add<HAllocate>(
-      size, type, allocation_mode.GetPretenureMode(),
-      instance_type, allocation_mode.feedback_site());
+      size, type, allocation_mode.GetPretenureMode(), instance_type,
+      graph()->GetConstant0(), allocation_mode.feedback_site());
 
   // Setup the allocation memento.
   if (allocation_mode.CreateAllocationMementos()) {
@@ -2673,7 +2886,6 @@
       return array_builder->AllocateEmptyArray();
     } else {
       return array_builder->AllocateArray(length_argument,
-                                          array_length,
                                           length_argument);
     }
   }
@@ -2706,7 +2918,7 @@
   // Figure out total size
   HValue* length = Pop();
   HValue* capacity = Pop();
-  return array_builder->AllocateArray(capacity, max_alloc_length, length);
+  return array_builder->AllocateArray(capacity, length);
 }
 
 
@@ -2738,8 +2950,8 @@
     base_size += AllocationMemento::kSize;
   }
   HConstant* size_in_bytes = Add<HConstant>(base_size);
-  return Add<HAllocate>(
-      size_in_bytes, HType::JSArray(), NOT_TENURED, JS_OBJECT_TYPE);
+  return Add<HAllocate>(size_in_bytes, HType::JSArray(), NOT_TENURED,
+                        JS_OBJECT_TYPE, graph()->GetConstant0());
 }
 
 
@@ -2761,7 +2973,7 @@
       : FIXED_ARRAY_TYPE;
 
   return Add<HAllocate>(size_in_bytes, HType::HeapObject(), NOT_TENURED,
-                        instance_type);
+                        instance_type, graph()->GetConstant0());
 }
 
 
@@ -3149,14 +3361,6 @@
 
   HAllocate* elements = BuildAllocateElements(kind, elements_size);
 
-  // This function implicitly relies on the fact that the
-  // FastCloneShallowArrayStub is called only for literals shorter than
-  // JSArray::kInitialMaxFastElementArray.
-  // Can't add HBoundsCheck here because otherwise the stub will eager a frame.
-  HConstant* size_upper_bound = EstablishElementsAllocationSize(
-      kind, JSArray::kInitialMaxFastElementArray);
-  elements->set_size_upper_bound(size_upper_bound);
-
   Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
 
   // The allocation for the cloned array above causes register pressure on
@@ -3396,42 +3600,12 @@
 HAllocate* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
   HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
   return AllocateArray(capacity,
-                       capacity,
                        builder()->graph()->GetConstant0());
 }
 
 
 HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
     HValue* capacity,
-    HConstant* capacity_upper_bound,
-    HValue* length_field,
-    FillMode fill_mode) {
-  return AllocateArray(capacity,
-                       capacity_upper_bound->GetInteger32Constant(),
-                       length_field,
-                       fill_mode);
-}
-
-
-HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
-    HValue* capacity,
-    int capacity_upper_bound,
-    HValue* length_field,
-    FillMode fill_mode) {
-  HConstant* elememts_size_upper_bound = capacity->IsInteger32Constant()
-      ? HConstant::cast(capacity)
-      : builder()->EstablishElementsAllocationSize(kind_, capacity_upper_bound);
-
-  HAllocate* array = AllocateArray(capacity, length_field, fill_mode);
-  if (!elements_location_->has_size_upper_bound()) {
-    elements_location_->set_size_upper_bound(elememts_size_upper_bound);
-  }
-  return array;
-}
-
-
-HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
-    HValue* capacity,
     HValue* length_field,
     FillMode fill_mode) {
   // These HForceRepresentations are because we store these as fields in the
@@ -3506,7 +3680,8 @@
       break_scope_(NULL),
       inlined_count_(0),
       globals_(10, info->zone()),
-      osr_(new (info->zone()) HOsrBuilder(this)) {
+      osr_(new (info->zone()) HOsrBuilder(this)),
+      bounds_(info->zone()) {
   // This is not initialized in the initializer list because the
   // constructor for the initial state relies on function_state_ == NULL
   // to know it's the initial state.
@@ -3599,7 +3774,6 @@
   return os << "B" << b.block_id();
 }
 
-
 HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
     : isolate_(info->isolate()),
       next_block_id_(0),
@@ -3612,21 +3786,19 @@
       info_(info),
       descriptor_(descriptor),
       zone_(info->zone()),
+      allow_code_motion_(false),
       use_optimistic_licm_(false),
       depends_on_empty_array_proto_elements_(false),
       type_change_checksum_(0),
       maximum_environment_size_(0),
       no_side_effects_scope_count_(0),
-      disallow_adding_new_values_(false) {
+      disallow_adding_new_values_(false),
+      inlined_function_infos_(info->zone()) {
   if (info->IsStub()) {
     // For stubs, explicitly add the context to the environment.
     start_environment_ = new (zone_)
         HEnvironment(zone_, descriptor.GetRegisterParameterCount() + 1);
   } else {
-    if (info->is_tracking_positions()) {
-      info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown(),
-                                 InlinedFunctionInfo::kNoParentId);
-    }
     start_environment_ =
         new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
   }
@@ -3655,7 +3827,8 @@
 
 int HGraph::SourcePositionToScriptPosition(SourcePosition pos) {
   return (FLAG_hydrogen_track_positions && !pos.IsUnknown())
-             ? info()->start_position_for(pos.inlining_id()) + pos.position()
+             ? inlined_function_infos_.at(pos.inlining_id()).start_position +
+                   pos.position()
              : pos.raw();
 }
 
@@ -4445,6 +4618,11 @@
       !type_info->matches_inlined_type_change_checksum(composite_checksum));
   type_info->set_inlined_type_change_checksum(composite_checksum);
 
+  // Set this predicate early to avoid handle deref during graph optimization.
+  graph()->set_allow_code_motion(
+      current_info()->IsStub() ||
+      current_info()->shared_info()->opt_count() + 1 < FLAG_max_opt_count);
+
   // Perform any necessary OSR-specific cleanups or changes to the graph.
   osr()->FinishGraph();
 
@@ -4991,7 +5169,7 @@
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   Add<HSimulate>(stmt->EntryId());
   HValue* tag_value = Top();
-  Type* tag_type = stmt->tag()->bounds().lower;
+  Type* tag_type = bounds_.get(stmt->tag()).lower;
 
   // 1. Build all the tests, with dangling true branches
   BailoutId default_id = BailoutId::None();
@@ -5008,7 +5186,7 @@
     if (current_block() == NULL) return Bailout(kUnsupportedSwitchStatement);
     HValue* label_value = Pop();
 
-    Type* label_type = clause->label()->bounds().lower;
+    Type* label_type = bounds_.get(clause->label()).lower;
     Type* combined_type = clause->compare_type();
     HControlInstruction* compare = BuildCompareInstruction(
         Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
@@ -5500,8 +5678,8 @@
     FastNewClosureDescriptor descriptor(isolate());
     HValue* values[] = {context(), shared_info_value};
     HConstant* stub_value = Add<HConstant>(stub.GetCode());
-    instr = New<HCallWithDescriptor>(
-        stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
+    instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
+                                     ArrayVector(values));
   } else {
     Add<HPushArguments>(shared_info_value);
     Runtime::FunctionId function_id =
@@ -5593,6 +5771,7 @@
       return kUseGeneric;
     case LookupIterator::DATA:
       if (access_type == STORE && it->IsReadOnly()) return kUseGeneric;
+      if (!it->GetHolder<JSObject>()->IsJSGlobalObject()) return kUseGeneric;
       return kUseCell;
     case LookupIterator::JSPROXY:
     case LookupIterator::TRANSITION:
@@ -5746,9 +5925,6 @@
         case CONST:
           mode = HLoadContextSlot::kCheckDeoptimize;
           break;
-        case CONST_LEGACY:
-          mode = HLoadContextSlot::kCheckReturnUndefined;
-          break;
         default:
           mode = HLoadContextSlot::kNoCheck;
           break;
@@ -5782,9 +5958,8 @@
       context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
       Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
   HConstant* stub_value = Add<HConstant>(callable.code());
-  HInstruction* instr =
-      New<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
-                               Vector<HValue*>(values, arraysize(values)));
+  HInstruction* instr = New<HCallWithDescriptor>(
+      stub_value, 0, callable.descriptor(), ArrayVector(values));
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
@@ -5995,59 +6170,29 @@
   Handle<AllocationSite> site;
   Handle<LiteralsArray> literals(environment()->closure()->literals(),
                                  isolate());
-  bool uninitialized = false;
   Handle<Object> literals_cell(literals->literal(expr->literal_index()),
                                isolate());
   Handle<JSObject> boilerplate_object;
-  if (literals_cell->IsUndefined()) {
-    uninitialized = true;
-    Handle<Object> raw_boilerplate;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate(), raw_boilerplate,
-        Runtime::CreateArrayLiteralBoilerplate(isolate(), literals,
-                                               expr->constant_elements()),
-        Bailout(kArrayBoilerplateCreationFailed));
-
-    boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
-    AllocationSiteCreationContext creation_context(isolate());
-    site = creation_context.EnterNewScope();
-    if (JSObject::DeepWalk(boilerplate_object, &creation_context).is_null()) {
-      return Bailout(kArrayBoilerplateCreationFailed);
-    }
-    creation_context.ExitScope(site, boilerplate_object);
-    literals->set_literal(expr->literal_index(), *site);
-
-    if (boilerplate_object->elements()->map() ==
-        isolate()->heap()->fixed_cow_array_map()) {
-      isolate()->counters()->cow_arrays_created_runtime()->Increment();
-    }
-  } else {
+  if (!literals_cell->IsUndefined()) {
     DCHECK(literals_cell->IsAllocationSite());
     site = Handle<AllocationSite>::cast(literals_cell);
     boilerplate_object = Handle<JSObject>(
         JSObject::cast(site->transition_info()), isolate());
   }
 
-  DCHECK(!boilerplate_object.is_null());
-  DCHECK(site->SitePointsToLiteral());
-
-  ElementsKind boilerplate_elements_kind =
-      boilerplate_object->GetElementsKind();
-
   // Check whether to use fast or slow deep-copying for boilerplate.
   int max_properties = kMaxFastLiteralProperties;
-  if (IsFastLiteral(boilerplate_object,
-                    kMaxFastLiteralDepth,
+  if (!boilerplate_object.is_null() &&
+      IsFastLiteral(boilerplate_object, kMaxFastLiteralDepth,
                     &max_properties)) {
+    DCHECK(site->SitePointsToLiteral());
     AllocationSiteUsageContext site_context(isolate(), site, false);
     site_context.EnterNewScope();
     literal = BuildFastLiteral(boilerplate_object, &site_context);
     site_context.ExitScope(site, boilerplate_object);
   } else {
     NoObservableSideEffectsScope no_effects(this);
-    // Boilerplate already exists and constant elements are never accessed,
-    // pass an empty fixed array to the runtime function instead.
-    Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
+    Handle<FixedArray> constants = expr->constant_elements();
     int literal_index = expr->literal_index();
     int flags = expr->ComputeFlags(true);
 
@@ -6058,7 +6203,9 @@
     literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
 
     // Register to deopt if the boilerplate ElementsKind changes.
-    top_info()->dependencies()->AssumeTransitionStable(site);
+    if (!site.is_null()) {
+      top_info()->dependencies()->AssumeTransitionStable(site);
+    }
   }
 
   // The array is expected in the bailout environment during computation
@@ -6083,21 +6230,28 @@
 
     HValue* key = Add<HConstant>(i);
 
-    switch (boilerplate_elements_kind) {
-      case FAST_SMI_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_HOLEY_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_HOLEY_DOUBLE_ELEMENTS: {
-        HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value, nullptr,
-                                              boilerplate_elements_kind);
-        instr->SetUninitialized(uninitialized);
-        break;
+    if (!boilerplate_object.is_null()) {
+      ElementsKind boilerplate_elements_kind =
+          boilerplate_object->GetElementsKind();
+      switch (boilerplate_elements_kind) {
+        case FAST_SMI_ELEMENTS:
+        case FAST_HOLEY_SMI_ELEMENTS:
+        case FAST_ELEMENTS:
+        case FAST_HOLEY_ELEMENTS:
+        case FAST_DOUBLE_ELEMENTS:
+        case FAST_HOLEY_DOUBLE_ELEMENTS: {
+          Add<HStoreKeyed>(elements, key, value, nullptr,
+                           boilerplate_elements_kind);
+          break;
+        }
+        default:
+          UNREACHABLE();
+          break;
       }
-      default:
-        UNREACHABLE();
-        break;
+    } else {
+      HInstruction* instr = BuildKeyedGeneric(
+          STORE, expr, expr->LiteralFeedbackSlot(), literal, key, value);
+      AddInstruction(instr);
     }
 
     Add<HSimulate>(expr->GetIdForElement(i));
@@ -6176,10 +6330,9 @@
       HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
 
       // TODO(hpayer): Allocation site pretenuring support.
-      HInstruction* heap_number = Add<HAllocate>(heap_number_size,
-          HType::HeapObject(),
-          NOT_TENURED,
-          MUTABLE_HEAP_NUMBER_TYPE);
+      HInstruction* heap_number =
+          Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
+                         MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
       AddStoreMapConstant(
           heap_number, isolate()->factory()->mutable_heap_number_map());
       Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
@@ -6761,7 +6914,6 @@
       // possible if the receiver had a known map at some point, and no
       // map-changing stores have happened to it since.
       Handle<Map> candidate_map = receiver->GetMonomorphicJSObjectMap();
-      if (candidate_map->is_observed()) return false;
       for (HInstruction* current = builder->current_block()->last();
            current != nullptr; current = current->previous()) {
         if (current->IsBlockEntry()) break;
@@ -7026,7 +7178,11 @@
           case CONST:
             return Bailout(kNonInitializerAssignmentToConst);
           case CONST_LEGACY:
-            return ast_context()->ReturnValue(Pop());
+            if (is_strict(function_language_mode())) {
+              return Bailout(kNonInitializerAssignmentToConst);
+            } else {
+              return ast_context()->ReturnValue(Pop());
+            }
           default:
             mode = HStoreContextSlot::kNoCheck;
         }
@@ -7095,10 +7251,16 @@
       }
     } else if (var->mode() == CONST_LEGACY) {
       if (expr->op() != Token::INIT) {
-        CHECK_ALIVE(VisitForValue(expr->value()));
-        return ast_context()->ReturnValue(Pop());
+        if (is_strict(function_language_mode())) {
+          return Bailout(kNonInitializerAssignmentToConst);
+        } else {
+          CHECK_ALIVE(VisitForValue(expr->value()));
+          return ast_context()->ReturnValue(Pop());
+        }
       }
 
+      // TODO(adamk): Is this required? Legacy const variables are always
+      // initialized before use.
       if (var->IsStackAllocated()) {
         // We insert a use of the old value to detect unsupported uses of const
         // variables (e.g. initialization inside a loop).
@@ -7170,11 +7332,7 @@
           }
         } else {
           DCHECK_EQ(Token::INIT, expr->op());
-          if (var->mode() == CONST_LEGACY) {
-            mode = HStoreContextSlot::kCheckIgnoreAssignment;
-          } else {
-            mode = HStoreContextSlot::kNoCheck;
-          }
+          mode = HStoreContextSlot::kNoCheck;
         }
 
         HValue* context = BuildContextChainWalk(var);
@@ -7546,9 +7704,13 @@
   // Get transition target for each map (NULL == no transition).
   for (int i = 0; i < maps->length(); ++i) {
     Handle<Map> map = maps->at(i);
-    Handle<Map> transitioned_map =
-        Map::FindTransitionedMap(map, &possible_transitioned_maps);
-    transition_target.Add(transitioned_map);
+    Map* transitioned_map =
+        map->FindElementsKindTransitionedMap(&possible_transitioned_maps);
+    if (transitioned_map != nullptr) {
+      transition_target.Add(handle(transitioned_map));
+    } else {
+      transition_target.Add(Handle<Map>());
+    }
   }
 
   MapHandleList untransitionable_maps(maps->length());
@@ -8025,7 +8187,7 @@
   HConstant* stub = Add<HConstant>(callable.code());
 
   return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
-                                  Vector<HValue*>(op_vals, arraysize(op_vals)),
+                                  ArrayVector(op_vals),
                                   syntactic_tail_call_mode);
 }
 
@@ -8050,7 +8212,7 @@
   HConstant* stub = Add<HConstant>(callable.code());
 
   return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
-                                  Vector<HValue*>(op_vals, arraysize(op_vals)),
+                                  ArrayVector(op_vals),
                                   syntactic_tail_call_mode);
 }
 
@@ -8402,7 +8564,7 @@
       top_info()->parse_info()->ast_value_factory());
   parse_info.set_ast_value_factory_owned(false);
 
-  CompilationInfo target_info(&parse_info);
+  CompilationInfo target_info(&parse_info, target);
   Handle<SharedFunctionInfo> target_shared(target->shared());
 
   if (inlining_kind != CONSTRUCT_CALL_RETURN &&
@@ -8424,6 +8586,10 @@
     TraceInline(target, caller, "parse failure");
     return false;
   }
+  if (target_shared->dont_crankshaft()) {
+    TraceInline(target, caller, "ParseAndAnalyze found incompatibility");
+    return false;
+  }
 
   if (target_info.scope()->num_heap_slots() > 0) {
     TraceInline(target, caller, "target has context-allocated variables");
@@ -8496,13 +8662,13 @@
   // Type-check the inlined function.
   DCHECK(target_shared->has_deoptimization_support());
   AstTyper(target_info.isolate(), target_info.zone(), target_info.closure(),
-           target_info.scope(), target_info.osr_ast_id(), target_info.literal())
+           target_info.scope(), target_info.osr_ast_id(), target_info.literal(),
+           &bounds_)
       .Run();
 
   int inlining_id = 0;
   if (top_info()->is_tracking_positions()) {
-    inlining_id = top_info()->TraceInlinedFunction(
-        target_shared, source_position(), function_state()->inlining_id());
+    inlining_id = TraceInlinedFunction(target_shared, source_position());
   }
 
   // Save the pending call context. Set up new one for the inlined function.
@@ -8767,8 +8933,7 @@
   return !receiver_map.is_null() && receiver_map->prototype()->IsJSObject() &&
          receiver_map->instance_type() == JS_ARRAY_TYPE &&
          IsFastElementsKind(receiver_map->elements_kind()) &&
-         !receiver_map->is_dictionary_map() && !receiver_map->is_observed() &&
-         receiver_map->is_extensible() &&
+         !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
          (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
          !IsReadOnlyLengthDescriptor(receiver_map);
 }
@@ -9145,7 +9310,6 @@
       if (!receiver_map->prototype()->IsJSObject()) return false;
       ElementsKind kind = receiver_map->elements_kind();
       if (!IsFastElementsKind(kind)) return false;
-      if (receiver_map->is_observed()) return false;
       if (argument_count != 2) return false;
       if (!receiver_map->is_extensible()) return false;
 
@@ -10091,9 +10255,8 @@
   Callable callable = CodeFactory::Construct(isolate());
   HConstant* stub = Add<HConstant>(callable.code());
   PushArgumentsFromEnvironment(argument_count);
-  HInstruction* construct =
-      New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
-                               Vector<HValue*>(op_vals, arraysize(op_vals)));
+  HInstruction* construct = New<HCallWithDescriptor>(
+      stub, argument_count, callable.descriptor(), ArrayVector(op_vals));
   return ast_context()->ReturnInstruction(construct, expr->id());
 }
 
@@ -10202,7 +10365,8 @@
   length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
   HValue* elements = Add<HAllocate>(
       Add<HConstant>(FixedTypedArrayBase::kHeaderSize), HType::HeapObject(),
-      NOT_TENURED, external_array_map->instance_type());
+      NOT_TENURED, external_array_map->instance_type(),
+      graph()->GetConstant0());
 
   AddStoreMapConstant(elements, external_array_map);
   Add<HStoreNamedField>(elements,
@@ -10258,9 +10422,9 @@
   length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
   Handle<Map> fixed_typed_array_map(
       isolate()->heap()->MapForFixedTypedArray(array_type));
-  HAllocate* elements =
-      Add<HAllocate>(total_size, HType::HeapObject(), NOT_TENURED,
-                     fixed_typed_array_map->instance_type());
+  HAllocate* elements = Add<HAllocate>(
+      total_size, HType::HeapObject(), NOT_TENURED,
+      fixed_typed_array_map->instance_type(), graph()->GetConstant0());
 
 #ifndef V8_HOST_ARCH_64_BIT
   if (array_type == kExternalFloat64Array) {
@@ -10972,9 +11136,9 @@
     HValue* left,
     HValue* right,
     PushBeforeSimulateBehavior push_sim_result) {
-  Type* left_type = expr->left()->bounds().lower;
-  Type* right_type = expr->right()->bounds().lower;
-  Type* result_type = expr->bounds().lower;
+  Type* left_type = bounds_.get(expr->left()).lower;
+  Type* right_type = bounds_.get(expr->right()).lower;
+  Type* result_type = bounds_.get(expr).lower;
   Maybe<int> fixed_right_arg = expr->fixed_right_arg();
   Handle<AllocationSite> allocation_site = expr->allocation_site();
 
@@ -11159,46 +11323,53 @@
   // inline several instructions (including the two pushes) for every tagged
   // operation in optimized code, which is more expensive, than a stub call.
   if (graph()->info()->IsStub() && is_non_primitive) {
-    Runtime::FunctionId function_id;
+    HValue* values[] = {context(), left, right};
+#define GET_STUB(Name)                                                       \
+  do {                                                                       \
+    Callable callable = CodeFactory::Name(isolate());                        \
+    HValue* stub = Add<HConstant>(callable.code());                          \
+    instr = AddUncasted<HCallWithDescriptor>(stub, 0, callable.descriptor(), \
+                                             ArrayVector(values));           \
+  } while (false)
+
     switch (op) {
       default:
         UNREACHABLE();
       case Token::ADD:
-        function_id = Runtime::kAdd;
+        GET_STUB(Add);
         break;
       case Token::SUB:
-        function_id = Runtime::kSubtract;
+        GET_STUB(Subtract);
         break;
       case Token::MUL:
-        function_id = Runtime::kMultiply;
+        GET_STUB(Multiply);
         break;
       case Token::DIV:
-        function_id = Runtime::kDivide;
+        GET_STUB(Divide);
         break;
       case Token::MOD:
-        function_id = Runtime::kModulus;
+        GET_STUB(Modulus);
         break;
       case Token::BIT_OR:
-        function_id = Runtime::kBitwiseOr;
+        GET_STUB(BitwiseOr);
         break;
       case Token::BIT_AND:
-        function_id = Runtime::kBitwiseAnd;
+        GET_STUB(BitwiseAnd);
         break;
       case Token::BIT_XOR:
-        function_id = Runtime::kBitwiseXor;
+        GET_STUB(BitwiseXor);
         break;
       case Token::SAR:
-        function_id = Runtime::kShiftRight;
+        GET_STUB(ShiftRight);
         break;
       case Token::SHR:
-        function_id = Runtime::kShiftRightLogical;
+        GET_STUB(ShiftRightLogical);
         break;
       case Token::SHL:
-        function_id = Runtime::kShiftLeft;
+        GET_STUB(ShiftLeft);
         break;
     }
-    Add<HPushArguments>(left, right);
-    instr = AddUncasted<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
+#undef GET_STUB
   } else {
     switch (op) {
       case Token::ADD:
@@ -11502,8 +11673,8 @@
     return ast_context()->ReturnControl(instr, expr->id());
   }
 
-  Type* left_type = expr->left()->bounds().lower;
-  Type* right_type = expr->right()->bounds().lower;
+  Type* left_type = bounds_.get(expr->left()).lower;
+  Type* right_type = bounds_.get(expr->right()).lower;
   Type* combined_type = expr->combined_type();
 
   CHECK_ALIVE(VisitForValue(expr->left()));
@@ -11520,18 +11691,24 @@
   }
 
   if (op == Token::INSTANCEOF) {
-    DCHECK(!FLAG_harmony_instanceof);
     // Check to see if the rhs of the instanceof is a known function.
     if (right->IsConstant() &&
         HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
-      Handle<JSFunction> constructor =
+      Handle<JSFunction> function =
           Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
-      if (constructor->IsConstructor() &&
-          !constructor->map()->has_non_instance_prototype()) {
-        JSFunction::EnsureHasInitialMap(constructor);
-        DCHECK(constructor->has_initial_map());
-        Handle<Map> initial_map(constructor->initial_map(), isolate());
+      // Make sure the prototype of {function} is the %FunctionPrototype%, and
+      // it already has a meaningful initial map (i.e. we constructed at least
+      // one instance using the constructor {function}).
+      // We can only use the fast case if @@hasInstance was not used so far.
+      if (function->has_initial_map() &&
+          function->map()->prototype() ==
+              function->native_context()->closure() &&
+          !function->map()->has_non_instance_prototype() &&
+          isolate()->IsHasInstanceLookupChainIntact()) {
+        Handle<Map> initial_map(function->initial_map(), isolate());
         top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
+        top_info()->dependencies()->AssumePropertyCell(
+            isolate()->factory()->has_instance_protector());
         HInstruction* prototype =
             Add<HConstant>(handle(initial_map->prototype(), isolate()));
         HHasInPrototypeChainAndBranch* result =
@@ -11540,13 +11717,21 @@
       }
     }
 
-    HInstanceOf* result = New<HInstanceOf>(left, right);
+    Callable callable = CodeFactory::InstanceOf(isolate());
+    HValue* stub = Add<HConstant>(callable.code());
+    HValue* values[] = {context(), left, right};
+    HCallWithDescriptor* result = New<HCallWithDescriptor>(
+        stub, 0, callable.descriptor(), ArrayVector(values));
+    result->set_type(HType::Boolean());
     return ast_context()->ReturnInstruction(result, expr->id());
 
   } else if (op == Token::IN) {
-    Add<HPushArguments>(left, right);
+    Callable callable = CodeFactory::HasProperty(isolate());
+    HValue* stub = Add<HConstant>(callable.code());
+    HValue* values[] = {context(), left, right};
     HInstruction* result =
-        New<HCallRuntime>(Runtime::FunctionForId(Runtime::kHasProperty), 2);
+        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+                                 Vector<HValue*>(values, arraysize(values)));
     return ast_context()->ReturnInstruction(result, expr->id());
   }
 
@@ -11827,8 +12012,9 @@
   }
   top_info()->dependencies()->AssumeTransitionStable(current_site);
 
-  HInstruction* object = Add<HAllocate>(
-      object_size_constant, type, pretenure_flag, instance_type, top_site);
+  HInstruction* object =
+      Add<HAllocate>(object_size_constant, type, pretenure_flag, instance_type,
+                     graph()->GetConstant0(), top_site);
 
   // If allocation folding reaches Page::kMaxRegularHeapObjectSize the
   // elements array may not get folded into the object. Hence, we set the
@@ -11869,7 +12055,8 @@
     InstanceType instance_type = boilerplate_object->HasFastDoubleElements()
         ? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
     object_elements = Add<HAllocate>(object_elements_size, HType::HeapObject(),
-                                     pretenure_flag, instance_type, top_site);
+                                     pretenure_flag, instance_type,
+                                     graph()->GetConstant0(), top_site);
     BuildEmitElements(boilerplate_object, elements, object_elements,
                       site_context);
     Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
@@ -11970,9 +12157,9 @@
       if (representation.IsDouble()) {
         // Allocate a HeapNumber box and store the value into it.
         HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
-        HInstruction* double_box =
-            Add<HAllocate>(heap_number_constant, HType::HeapObject(),
-                pretenure_flag, MUTABLE_HEAP_NUMBER_TYPE);
+        HInstruction* double_box = Add<HAllocate>(
+            heap_number_constant, HType::HeapObject(), pretenure_flag,
+            MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
         AddStoreMapConstant(double_box,
             isolate()->factory()->mutable_heap_number_map());
         // Unwrap the mutable heap number from the boilerplate.
@@ -12113,10 +12300,7 @@
     Handle<FixedArray> array =
        isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
     for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
-    int flags =
-        DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
-        DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
-        DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+    int flags = current_info()->GetDeclareGlobalsFlags();
     Add<HDeclareGlobals>(array, flags);
     globals_.Rewind(0);
   }
@@ -12128,14 +12312,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_.Add(variable->name(), zone());
-      globals_.Add(variable->binding_needs_init()
-                       ? isolate()->factory()->the_hole_value()
-                       : isolate()->factory()->undefined_value(), zone());
+      globals_.Add(isolate()->factory()->undefined_value(), zone());
       return;
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL:
@@ -12291,9 +12474,8 @@
     Callable callable = CodeFactory::ToInteger(isolate());
     HValue* stub = Add<HConstant>(callable.code());
     HValue* values[] = {context(), input};
-    HInstruction* result =
-        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
-                                 Vector<HValue*>(values, arraysize(values)));
+    HInstruction* result = New<HCallWithDescriptor>(
+        stub, 0, callable.descriptor(), ArrayVector(values));
     return ast_context()->ReturnInstruction(result, call->id());
   }
 }
@@ -12315,9 +12497,8 @@
     Callable callable = CodeFactory::ToName(isolate());
     HValue* stub = Add<HConstant>(callable.code());
     HValue* values[] = {context(), input};
-    HInstruction* result =
-        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
-                                 Vector<HValue*>(values, arraysize(values)));
+    HInstruction* result = New<HCallWithDescriptor>(
+        stub, 0, callable.descriptor(), ArrayVector(values));
     return ast_context()->ReturnInstruction(result, call->id());
   }
 }
@@ -12342,9 +12523,8 @@
     Callable callable = CodeFactory::ToString(isolate());
     HValue* stub = Add<HConstant>(callable.code());
     HValue* values[] = {context(), input};
-    HInstruction* result =
-        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
-                                 Vector<HValue*>(values, arraysize(values)));
+    HInstruction* result = New<HCallWithDescriptor>(
+        stub, 0, callable.descriptor(), ArrayVector(values));
     return ast_context()->ReturnInstruction(result, call->id());
   }
 }
@@ -12357,9 +12537,8 @@
   HValue* input = Pop();
   HValue* stub = Add<HConstant>(callable.code());
   HValue* values[] = {context(), input};
-  HInstruction* result =
-      New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
-                               Vector<HValue*>(values, arraysize(values)));
+  HInstruction* result = New<HCallWithDescriptor>(
+      stub, 0, callable.descriptor(), ArrayVector(values));
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -12532,9 +12711,9 @@
   Callable callable = CodeFactory::SubString(isolate());
   HValue* stub = Add<HConstant>(callable.code());
   HValue* values[] = {context()};
-  HInstruction* result = New<HCallWithDescriptor>(
-      stub, call->arguments()->length(), callable.descriptor(),
-      Vector<HValue*>(values, arraysize(values)));
+  HInstruction* result =
+      New<HCallWithDescriptor>(stub, call->arguments()->length(),
+                               callable.descriptor(), ArrayVector(values));
   result->set_type(HType::String());
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -12547,8 +12726,8 @@
   FastNewObjectDescriptor descriptor(isolate());
   HValue* values[] = {context(), Pop(), Pop()};
   HConstant* stub_value = Add<HConstant>(stub.GetCode());
-  HInstruction* result = New<HCallWithDescriptor>(
-      stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
+  HInstruction* result =
+      New<HCallWithDescriptor>(stub_value, 0, descriptor, ArrayVector(values));
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -12560,9 +12739,9 @@
   Callable callable = CodeFactory::RegExpExec(isolate());
   HValue* stub = Add<HConstant>(callable.code());
   HValue* values[] = {context()};
-  HInstruction* result = New<HCallWithDescriptor>(
-      stub, call->arguments()->length(), callable.descriptor(),
-      Vector<HValue*>(values, arraysize(values)));
+  HInstruction* result =
+      New<HCallWithDescriptor>(stub, call->arguments()->length(),
+                               callable.descriptor(), ArrayVector(values));
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -12644,54 +12823,15 @@
 void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
   DCHECK_LE(2, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
-
-  // Try and customize ES6 instanceof here.
-  // We should at least have the constructor on the expression stack.
-  if (FLAG_harmony_instanceof && FLAG_harmony_instanceof_opt &&
-      call->arguments()->length() == 3) {
-    HValue* target = environment()->ExpressionStackAt(2);
-    if (target->IsConstant()) {
-      HConstant* constant_function = HConstant::cast(target);
-      if (constant_function->handle(isolate())->IsJSFunction()) {
-        Handle<JSFunction> func =
-            Handle<JSFunction>::cast(constant_function->handle(isolate()));
-        if (*func == isolate()->native_context()->ordinary_has_instance()) {
-          // Look at the function, which will be argument 1.
-          HValue* right = environment()->ExpressionStackAt(1);
-          if (right->IsConstant() &&
-              HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
-            Handle<JSFunction> constructor = Handle<JSFunction>::cast(
-                HConstant::cast(right)->handle(isolate()));
-            if (constructor->IsConstructor() &&
-                !constructor->map()->has_non_instance_prototype()) {
-              JSFunction::EnsureHasInitialMap(constructor);
-              DCHECK(constructor->has_initial_map());
-              Handle<Map> initial_map(constructor->initial_map(), isolate());
-              top_info()->dependencies()->AssumeInitialMapCantChange(
-                  initial_map);
-              HInstruction* prototype =
-                  Add<HConstant>(handle(initial_map->prototype(), isolate()));
-              HValue* left = environment()->ExpressionStackAt(0);
-              HHasInPrototypeChainAndBranch* result =
-                  New<HHasInPrototypeChainAndBranch>(left, prototype);
-              Drop(3);
-              return ast_context()->ReturnControl(result, call->id());
-            }
-          }
-        }
-      }
-    }
-  }
-
   CallTrampolineDescriptor descriptor(isolate());
   PushArgumentsFromEnvironment(call->arguments()->length() - 1);
   HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
   HValue* target = Pop();
   HValue* values[] = {context(), target,
                       Add<HConstant>(call->arguments()->length() - 2)};
-  HInstruction* result = New<HCallWithDescriptor>(
-      trampoline, call->arguments()->length() - 1, descriptor,
-      Vector<HValue*>(values, arraysize(values)));
+  HInstruction* result =
+      New<HCallWithDescriptor>(trampoline, call->arguments()->length() - 1,
+                               descriptor, ArrayVector(values));
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -12793,7 +12933,7 @@
   // Allocate the table and add the proper map.
   HValue* table =
       Add<HAllocate>(Add<HConstant>(kSizeInBytes), HType::HeapObject(),
-                     NOT_TENURED, FIXED_ARRAY_TYPE);
+                     NOT_TENURED, FIXED_ARRAY_TYPE, graph()->GetConstant0());
   AddStoreMapConstant(table, isolate()->factory()->ordered_hash_table_map());
 
   // Initialize the FixedArray...
@@ -12923,13 +13063,6 @@
   return ast_context()->ReturnValue(value);
 }
 
-void HOptimizedGraphBuilder::GenerateGetOrdinaryHasInstance(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 0);
-  // ordinary_has_instance is immutable so we can treat it as a constant.
-  HValue* value = Add<HConstant>(isolate()->ordinary_has_instance());
-  return ast_context()->ReturnValue(value);
-}
-
 #undef CHECK_BAILOUT
 #undef CHECK_ALIVE
 
@@ -13251,13 +13384,24 @@
 
 void HTracer::TraceCompilation(CompilationInfo* info) {
   Tag tag(this, "compilation");
-  base::SmartArrayPointer<char> name = info->GetDebugName();
+  std::string name;
+  Object* source_name = info->script()->name();
+  if (source_name->IsString()) {
+    String* str = String::cast(source_name);
+    if (str->length() > 0) {
+      name.append(str->ToCString().get());
+      name.append(":");
+    }
+  }
+  base::SmartArrayPointer<char> method_name = info->GetDebugName();
+  name.append(method_name.get());
   if (info->IsOptimizing()) {
-    PrintStringProperty("name", name.get());
+    PrintStringProperty("name", name.c_str());
     PrintIndent();
-    trace_.Add("method \"%s:%d\"\n", name.get(), info->optimization_id());
+    trace_.Add("method \"%s:%d\"\n", method_name.get(),
+               info->optimization_id());
   } else {
-    PrintStringProperty("name", name.get());
+    PrintStringProperty("name", name.c_str());
     PrintStringProperty("method", "stub");
   }
   PrintLongProperty("date",
diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h
index 10c0baa..3811773 100644
--- a/src/crankshaft/hydrogen.h
+++ b/src/crankshaft/hydrogen.h
@@ -7,12 +7,14 @@
 
 #include "src/accessors.h"
 #include "src/allocation.h"
+#include "src/ast/ast-type-bounds.h"
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/bailout-reason.h"
 #include "src/compiler.h"
 #include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/hydrogen-instructions.h"
+#include "src/parsing/parser.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -30,6 +32,28 @@
 class LChunk;
 class LiveRange;
 
+class HCompilationJob final : public CompilationJob {
+ public:
+  explicit HCompilationJob(Handle<JSFunction> function)
+      : CompilationJob(&info_, "Crankshaft"),
+        zone_(function->GetIsolate()->allocator()),
+        parse_info_(&zone_, function),
+        info_(&parse_info_, function),
+        graph_(nullptr),
+        chunk_(nullptr) {}
+
+ protected:
+  virtual Status CreateGraphImpl();
+  virtual Status OptimizeGraphImpl();
+  virtual Status GenerateCodeImpl();
+
+ private:
+  Zone zone_;
+  ParseInfo parse_info_;
+  CompilationInfo info_;
+  HGraph* graph_;
+  LChunk* chunk_;
+};
 
 class HBasicBlock final : public ZoneObject {
  public:
@@ -293,6 +317,11 @@
   HStackCheck* stack_check_;
 };
 
+struct HInlinedFunctionInfo {
+  explicit HInlinedFunctionInfo(int start_position)
+      : start_position(start_position) {}
+  int start_position;
+};
 
 class HGraph final : public ZoneObject {
  public:
@@ -392,13 +421,11 @@
   }
   int maximum_environment_size() { return maximum_environment_size_; }
 
-  bool use_optimistic_licm() {
-    return use_optimistic_licm_;
-  }
+  bool allow_code_motion() const { return allow_code_motion_; }
+  void set_allow_code_motion(bool value) { allow_code_motion_ = value; }
 
-  void set_use_optimistic_licm(bool value) {
-    use_optimistic_licm_ = value;
-  }
+  bool use_optimistic_licm() const { return use_optimistic_licm_; }
+  void set_use_optimistic_licm(bool value) { use_optimistic_licm_ = value; }
 
   void MarkDependsOnEmptyArrayProtoElements() {
     // Add map dependency if not already added.
@@ -444,6 +471,10 @@
   // the corresponding script.
   int SourcePositionToScriptPosition(SourcePosition position);
 
+  ZoneVector<HInlinedFunctionInfo>& inlined_function_infos() {
+    return inlined_function_infos_;
+  }
+
  private:
   HConstant* ReinsertConstantIfNecessary(HConstant* constant);
   HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -480,6 +511,7 @@
   CallInterfaceDescriptor descriptor_;
   Zone* zone_;
 
+  bool allow_code_motion_;
   bool use_optimistic_licm_;
   bool depends_on_empty_array_proto_elements_;
   int type_change_checksum_;
@@ -487,6 +519,8 @@
   int no_side_effects_scope_count_;
   bool disallow_adding_new_values_;
 
+  ZoneVector<HInlinedFunctionInfo> inlined_function_infos_;
+
   DISALLOW_COPY_AND_ASSIGN(HGraph);
 };
 
@@ -1768,18 +1802,6 @@
     HAllocate* AllocateArray(HValue* capacity,
                              HValue* length_field,
                              FillMode fill_mode = FILL_WITH_HOLE);
-    // Use these allocators when capacity could be unknown at compile time
-    // but its limit is known. For constant |capacity| the value of
-    // |capacity_upper_bound| is ignored and the actual |capacity|
-    // value is used as an upper bound.
-    HAllocate* AllocateArray(HValue* capacity,
-                             int capacity_upper_bound,
-                             HValue* length_field,
-                             FillMode fill_mode = FILL_WITH_HOLE);
-    HAllocate* AllocateArray(HValue* capacity,
-                             HConstant* capacity_upper_bound,
-                             HValue* length_field,
-                             FillMode fill_mode = FILL_WITH_HOLE);
     HValue* GetElementsLocation() { return elements_location_; }
     HValue* EmitMapCode();
 
@@ -1929,6 +1951,9 @@
   SourcePosition source_position() { return position_; }
   void set_source_position(SourcePosition position) { position_ = position; }
 
+  int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+                           SourcePosition position);
+
   HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length);
   template <typename ViewClass>
   void BuildArrayBufferViewInitialization(HValue* obj,
@@ -2170,6 +2195,8 @@
 
   void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
 
+  AstTypeBounds* bounds() { return &bounds_; }
+
   void* operator new(size_t size, Zone* zone) { return zone->New(size); }
   void operator delete(void* pointer, Zone* zone) { }
   void operator delete(void* pointer) { }
@@ -2224,7 +2251,7 @@
     function_state()->ClearInlinedTestContext();
   }
   LanguageMode function_language_mode() {
-    return function_state()->compilation_info()->language_mode();
+    return function_state()->compilation_info()->parse_info()->language_mode();
   }
 
 #define FOR_EACH_HYDROGEN_INTRINSIC(F) \
@@ -2259,7 +2286,6 @@
   F(RegExpSource)                      \
   F(NumberToString)                    \
   F(DebugIsActive)                     \
-  F(GetOrdinaryHasInstance)            \
   /* Typed Arrays */                   \
   F(TypedArrayInitialize)              \
   F(MaxSmi)                            \
@@ -2903,6 +2929,8 @@
 
   HOsrBuilder* osr_;
 
+  AstTypeBounds bounds_;
+
   friend class FunctionState;  // Pushes and pops the state stack.
   friend class AstContext;  // Pushes and pops the AST context stack.
   friend class KeyedLoadFastElementStub;
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc
index ae1ca1f..fa0a897 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -165,11 +165,11 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in edi.
-    int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(edi);
@@ -274,8 +274,6 @@
       if (info()->saves_caller_doubles()) RestoreCallerDoubles();
       __ call(entry, RelocInfo::RUNTIME_ENTRY);
     }
-    info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                 table_entry->deopt_info.inlining_id);
   }
   if (needs_frame.is_linked()) {
     __ bind(&needs_frame);
@@ -725,13 +723,12 @@
     __ bind(&done);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   if (cc == no_condition && frame_is_built_) {
     DeoptComment(deopt_info);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
                                             !frame_is_built_);
@@ -2291,16 +2288,6 @@
 }
 
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(eax));
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
@@ -2728,9 +2715,9 @@
     __ j(not_equal, &done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
-      // it needs to bail out.
-      __ mov(result, isolate()->factory()->array_protector());
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid.
+      // Otherwise it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
              Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
       DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
@@ -3091,8 +3078,19 @@
   DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
 
   Label slow, allocated, done;
-  Register tmp = input_reg.is(eax) ? ecx : eax;
-  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+  uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
+  available_regs &= ~input_reg.bit();
+  if (instr->context()->IsRegister()) {
+    // Make sure that the context isn't overwritten in the AllocateHeapNumber
+    // macro below.
+    available_regs &= ~ToRegister(instr->context()).bit();
+  }
+
+  Register tmp =
+      Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+  available_regs &= ~tmp.bit();
+  Register tmp2 =
+      Register::from_code(base::bits::CountTrailingZeros32(available_regs));
 
   // Preserve the value of all registers.
   PushSafepointRegistersScope scope(this);
@@ -3975,7 +3973,15 @@
 
     LOperand* key = instr->key();
     if (key->IsConstantOperand()) {
-      __ mov(ebx, ToImmediate(key, Representation::Smi()));
+      LConstantOperand* constant_key = LConstantOperand::cast(key);
+      int32_t int_key = ToInteger32(constant_key);
+      if (Smi::IsValid(int_key)) {
+        __ mov(ebx, Immediate(Smi::FromInt(int_key)));
+      } else {
+        // We should never get here at runtime because there is a smi check on
+        // the key before this point.
+        __ int3();
+      }
     } else {
       __ Move(ebx, ToRegister(key));
       __ SmiTag(ebx);
@@ -4854,7 +4860,7 @@
   Register temp = ToRegister(instr->temp());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -4862,6 +4868,10 @@
     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
 
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
@@ -4892,6 +4902,29 @@
   }
 }
 
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, temp, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, temp, flags);
+  }
+}
 
 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   Register result = ToRegister(instr->result());
@@ -4931,6 +4964,22 @@
   CallRuntimeFromDeferred(
       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, eax);
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    __ sub(eax, Immediate(kHeapObjectTag));
+    __ mov(Operand::StaticVariable(allocation_top), eax);
+    __ add(eax, Immediate(kHeapObjectTag));
+  }
 }
 
 
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.h b/src/crankshaft/ia32/lithium-codegen-ia32.h
index bc61c96..5084819 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -115,8 +115,6 @@
 #undef DECLARE_DO
 
  private:
-  LanguageMode language_mode() const { return info()->language_mode(); }
-
   Scope* scope() const { return scope_; }
 
   XMMRegister double_scratch0() const { return xmm0; }
diff --git a/src/crankshaft/ia32/lithium-ia32.cc b/src/crankshaft/ia32/lithium-ia32.cc
index 4afeef5..0bfdb0d 100644
--- a/src/crankshaft/ia32/lithium-ia32.cc
+++ b/src/crankshaft/ia32/lithium-ia32.cc
@@ -910,7 +910,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -966,17 +966,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), esi);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
@@ -2406,14 +2395,19 @@
 
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
-  LOperand* size = instr->size()->IsConstant()
-      ? UseConstant(instr->size())
-      : UseTempRegister(instr->size());
-  LOperand* temp = TempRegister();
-  LAllocate* result = new(zone()) LAllocate(context, size, temp);
-  return AssignPointerMap(DefineAsRegister(result));
+  LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
+                                               : UseRegister(instr->size());
+  if (instr->IsAllocationFolded()) {
+    LOperand* temp = TempRegister();
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LOperand* temp = TempRegister();
+    LAllocate* result = new (zone()) LAllocate(context, size, temp);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 
diff --git a/src/crankshaft/ia32/lithium-ia32.h b/src/crankshaft/ia32/lithium-ia32.h
index 68541a4..d1d5a06 100644
--- a/src/crankshaft/ia32/lithium-ia32.h
+++ b/src/crankshaft/ia32/lithium-ia32.h
@@ -71,6 +71,7 @@
   V(Drop)                                    \
   V(Dummy)                                   \
   V(DummyUse)                                \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -82,7 +83,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -1139,22 +1139,6 @@
 };
 
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() const { return inputs_[0]; }
-  LOperand* left() const { return inputs_[1]; }
-  LOperand* right() const { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
@@ -2401,6 +2385,19 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp) {
+    inputs_[0] = size;
+    temps_[0] = temp;
+  }
+
+  LOperand* size() const { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
 
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
diff --git a/src/crankshaft/lithium-allocator.h b/src/crankshaft/lithium-allocator.h
index b648bd8..ce0e565 100644
--- a/src/crankshaft/lithium-allocator.h
+++ b/src/crankshaft/lithium-allocator.h
@@ -6,6 +6,7 @@
 #define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
 
 #include "src/allocation.h"
+#include "src/base/compiler-specific.h"
 #include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/lithium.h"
 #include "src/zone.h"
@@ -327,7 +328,7 @@
  public:
   LAllocator(int first_virtual_register, HGraph* graph);
 
-  static void TraceAlloc(const char* msg, ...);
+  static PRINTF_FORMAT(1, 2) void TraceAlloc(const char* msg, ...);
 
   // Checks whether the value of a given virtual register is tagged.
   bool HasTaggedValue(int virtual_register) const;
diff --git a/src/crankshaft/lithium-codegen.cc b/src/crankshaft/lithium-codegen.cc
index 53fedcf..4bf2100 100644
--- a/src/crankshaft/lithium-codegen.cc
+++ b/src/crankshaft/lithium-codegen.cc
@@ -158,8 +158,9 @@
 
 void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
   SourcePosition position = deopt_info.position;
+  int deopt_id = deopt_info.deopt_id;
   int raw_position = position.IsUnknown() ? 0 : position.raw();
-  masm()->RecordDeoptReason(deopt_info.deopt_reason, raw_position);
+  masm()->RecordDeoptReason(deopt_info.deopt_reason, raw_position, deopt_id);
 }
 
 
@@ -364,13 +365,10 @@
   }
 }
 
-
 Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
-    LInstruction* instr, Deoptimizer::DeoptReason deopt_reason) {
+    LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, int deopt_id) {
   Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
-                                    instr->Mnemonic(), deopt_reason);
-  HEnterInlined* enter_inlined = instr->environment()->entry();
-  deopt_info.inlining_id = enter_inlined ? enter_inlined->inlining_id() : 0;
+                                    deopt_reason, deopt_id);
   return deopt_info;
 }
 }  // namespace internal
diff --git a/src/crankshaft/lithium-codegen.h b/src/crankshaft/lithium-codegen.h
index b1f7dac..70b8897 100644
--- a/src/crankshaft/lithium-codegen.h
+++ b/src/crankshaft/lithium-codegen.h
@@ -12,6 +12,8 @@
 namespace v8 {
 namespace internal {
 
+class HGraph;
+class LChunk;
 class LEnvironment;
 class LInstruction;
 class LPlatformChunk;
@@ -33,10 +35,10 @@
   LPlatformChunk* chunk() const { return chunk_; }
   HGraph* graph() const;
 
-  void FPRINTF_CHECKING Comment(const char* format, ...);
+  void PRINTF_FORMAT(2, 3) Comment(const char* format, ...);
   void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
   static Deoptimizer::DeoptInfo MakeDeoptInfo(
-      LInstruction* instr, Deoptimizer::DeoptReason deopt_reason);
+      LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, int deopt_id);
 
   bool GenerateBody();
   virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc
index f1717ca..bdc5c64 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -330,8 +330,6 @@
       } else {
         __ Call(&call_deopt_entry);
       }
-      info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                   table_entry->deopt_info.inlining_id);
     }
 
     if (needs_frame.is_linked()) {
@@ -789,7 +787,7 @@
     __ bind(&skip);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
@@ -798,7 +796,6 @@
       !info()->saves_caller_doubles()) {
     DeoptComment(deopt_info);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
                                             !frame_is_built_);
@@ -1728,13 +1725,13 @@
   LOperand* left = instr->left();
   LOperand* right = instr->right();
   HMathMinMax::Operation operation = instr->hydrogen()->operation();
-  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+  Register scratch = scratch1();
   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
     Register left_reg = ToRegister(left);
     Register right_reg = EmitLoadRegister(right, scratch0());
     Register result_reg = ToRegister(instr->result());
     Label return_right, done;
-    Register scratch = scratch1();
     __ Slt(scratch, left_reg, Operand(right_reg));
     if (condition == ge) {
      __  Movz(result_reg, left_reg, scratch);
@@ -1749,43 +1746,19 @@
     FPURegister left_reg = ToDoubleRegister(left);
     FPURegister right_reg = ToDoubleRegister(right);
     FPURegister result_reg = ToDoubleRegister(instr->result());
-    Label check_nan_left, check_zero, return_left, return_right, done;
-    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
-    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
-    __ Branch(&return_right);
-
-    __ bind(&check_zero);
-    // left == right != 0.
-    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
-    // At this point, both left and right are either 0 or -0.
-    if (operation == HMathMinMax::kMathMin) {
-      // The algorithm is: -((-L) + (-R)), which in case of L and R being
-      // different registers is most efficiently expressed as -((-L) - R).
-      __ neg_d(left_reg, left_reg);
-      if (left_reg.is(right_reg)) {
-        __ add_d(result_reg, left_reg, right_reg);
-      } else {
-        __ sub_d(result_reg, left_reg, right_reg);
-      }
-      __ neg_d(result_reg, result_reg);
+    Label nan, done;
+    if (operation == HMathMinMax::kMathMax) {
+      __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
     } else {
-      __ add_d(result_reg, left_reg, right_reg);
+      DCHECK(operation == HMathMinMax::kMathMin);
+      __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
     }
     __ Branch(&done);
 
-    __ bind(&check_nan_left);
-    // left == NaN.
-    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
-    __ bind(&return_right);
-    if (!right_reg.is(result_reg)) {
-      __ mov_d(result_reg, right_reg);
-    }
-    __ Branch(&done);
+    __ bind(&nan);
+    __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+    __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
 
-    __ bind(&return_left);
-    if (!left_reg.is(result_reg)) {
-      __ mov_d(result_reg, left_reg);
-    }
     __ bind(&done);
   }
 }
@@ -2409,16 +2382,6 @@
 }
 
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(v0));
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
@@ -4159,7 +4122,15 @@
 
     LOperand* key = instr->key();
     if (key->IsConstantOperand()) {
-      __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
+      LConstantOperand* constant_key = LConstantOperand::cast(key);
+      int32_t int_key = ToInteger32(constant_key);
+      if (Smi::IsValid(int_key)) {
+        __ li(a3, Operand(Smi::FromInt(int_key)));
+      } else {
+        // We should never get here at runtime because there is a smi check on
+        // the key before this point.
+        __ stop("expected smi");
+      }
     } else {
       __ mov(a3, ToRegister(key));
       __ SmiTag(a3);
@@ -4456,7 +4427,7 @@
 
   if (FLAG_inline_new) {
     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
     __ Branch(&done);
   }
 
@@ -4480,16 +4451,13 @@
     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
-    __ Subu(v0, v0, kHeapObjectTag);
     __ StoreToSafepointRegisterSlot(v0, dst);
   }
 
-
   // Done. Put the value in dbl_scratch into the value of the allocated heap
   // number.
   __ bind(&done);
-  __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
-  __ Addu(dst, dst, kHeapObjectTag);
+  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
 }
 
 
@@ -4514,16 +4482,13 @@
   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   if (FLAG_inline_new) {
     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
-    // We want the untagged address first for performance
-    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
-                          DONT_TAG_RESULT);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
   } else {
     __ Branch(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
   // Now that we have finished with the object's real address tag it
-  __ Addu(reg, reg, kHeapObjectTag);
 }
 
 
@@ -4544,7 +4509,6 @@
   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
-  __ Subu(v0, v0, kHeapObjectTag);
   __ StoreToSafepointRegisterSlot(v0, reg);
 }
 
@@ -5092,7 +5056,7 @@
   Register scratch2 = ToRegister(instr->temp2());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -5100,6 +5064,12 @@
     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
     CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5168,6 +5138,49 @@
   CallRuntimeFromDeferred(
       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(v0, result);
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    Register top_address = scratch0();
+    __ Subu(v0, v0, Operand(kHeapObjectTag));
+    __ li(top_address, Operand(allocation_top));
+    __ sw(v0, MemOperand(top_address));
+    __ Addu(v0, v0, Operand(kHeapObjectTag));
+  }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  }
 }
 
 
diff --git a/src/crankshaft/mips/lithium-codegen-mips.h b/src/crankshaft/mips/lithium-codegen-mips.h
index 7a316e5..d2b0d2d 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/src/crankshaft/mips/lithium-codegen-mips.h
@@ -134,8 +134,6 @@
 #undef DECLARE_DO
 
  private:
-  LanguageMode language_mode() const { return info()->language_mode(); }
-
   Scope* scope() const { return scope_; }
 
   Register scratch0() { return kLithiumScratchReg; }
diff --git a/src/crankshaft/mips/lithium-mips.cc b/src/crankshaft/mips/lithium-mips.cc
index 71c34df..345694d 100644
--- a/src/crankshaft/mips/lithium-mips.cc
+++ b/src/crankshaft/mips/lithium-mips.cc
@@ -887,7 +887,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -939,17 +939,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), cp);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
@@ -2298,13 +2287,18 @@
 
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
   LOperand* size = UseRegisterOrConstant(instr->size());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
-  return AssignPointerMap(DefineAsRegister(result));
+  if (instr->IsAllocationFolded()) {
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 
diff --git a/src/crankshaft/mips/lithium-mips.h b/src/crankshaft/mips/lithium-mips.h
index 7d41093..ea5e792 100644
--- a/src/crankshaft/mips/lithium-mips.h
+++ b/src/crankshaft/mips/lithium-mips.h
@@ -67,6 +67,7 @@
   V(Drop)                                    \
   V(Dummy)                                   \
   V(DummyUse)                                \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -78,7 +79,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -1113,22 +1113,6 @@
 };
 
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() const { return inputs_[0]; }
-  LOperand* left() const { return inputs_[1]; }
-  LOperand* right() const { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -2350,6 +2334,21 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* size() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
 
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
index c7bbe9f..d114e4f 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -315,8 +315,6 @@
           __ BranchAndLink(&call_deopt_entry);
         }
       }
-      info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                   table_entry->deopt_info.inlining_id);
     }
     if (needs_frame.is_linked()) {
       __ bind(&needs_frame);
@@ -776,7 +774,7 @@
     __ bind(&skip);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
@@ -785,7 +783,6 @@
       !info()->saves_caller_doubles()) {
     DeoptComment(deopt_info);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry* table_entry =
         new (zone()) Deoptimizer::JumpTableEntry(
@@ -1846,13 +1843,13 @@
   LOperand* left = instr->left();
   LOperand* right = instr->right();
   HMathMinMax::Operation operation = instr->hydrogen()->operation();
-  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+  Register scratch = scratch1();
   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
     Register left_reg = ToRegister(left);
     Register right_reg = EmitLoadRegister(right, scratch0());
     Register result_reg = ToRegister(instr->result());
     Label return_right, done;
-    Register scratch = scratch1();
     __ Slt(scratch, left_reg, Operand(right_reg));
     if (condition == ge) {
      __  Movz(result_reg, left_reg, scratch);
@@ -1867,43 +1864,19 @@
     FPURegister left_reg = ToDoubleRegister(left);
     FPURegister right_reg = ToDoubleRegister(right);
     FPURegister result_reg = ToDoubleRegister(instr->result());
-    Label check_nan_left, check_zero, return_left, return_right, done;
-    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
-    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
-    __ Branch(&return_right);
-
-    __ bind(&check_zero);
-    // left == right != 0.
-    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
-    // At this point, both left and right are either 0 or -0.
-    if (operation == HMathMinMax::kMathMin) {
-      // The algorithm is: -((-L) + (-R)), which in case of L and R being
-      // different registers is most efficiently expressed as -((-L) - R).
-      __ neg_d(left_reg, left_reg);
-      if (left_reg.is(right_reg)) {
-        __ add_d(result_reg, left_reg, right_reg);
-      } else {
-        __ sub_d(result_reg, left_reg, right_reg);
-      }
-      __ neg_d(result_reg, result_reg);
+    Label nan, done;
+    if (operation == HMathMinMax::kMathMax) {
+      __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
     } else {
-      __ add_d(result_reg, left_reg, right_reg);
+      DCHECK(operation == HMathMinMax::kMathMin);
+      __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
     }
     __ Branch(&done);
 
-    __ bind(&check_nan_left);
-    // left == NaN.
-    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
-    __ bind(&return_right);
-    if (!right_reg.is(result_reg)) {
-      __ mov_d(result_reg, right_reg);
-    }
-    __ Branch(&done);
+    __ bind(&nan);
+    __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+    __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
 
-    __ bind(&return_left);
-    if (!left_reg.is(result_reg)) {
-      __ mov_d(result_reg, left_reg);
-    }
     __ bind(&done);
   }
 }
@@ -2529,18 +2502,6 @@
 }
 
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  Label true_label, done;
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(v0));
-
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
@@ -4391,7 +4352,15 @@
 
     LOperand* key = instr->key();
     if (key->IsConstantOperand()) {
-      __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
+      LConstantOperand* constant_key = LConstantOperand::cast(key);
+      int32_t int_key = ToInteger32(constant_key);
+      if (Smi::IsValid(int_key)) {
+        __ li(a3, Operand(Smi::FromInt(int_key)));
+      } else {
+        // We should never get here at runtime because there is a smi check on
+        // the key before this point.
+        __ stop("expected smi");
+      }
     } else {
       __ mov(a3, ToRegister(key));
       __ SmiTag(a3);
@@ -4662,7 +4631,7 @@
 
   if (FLAG_inline_new) {
     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
     __ Branch(&done);
   }
 
@@ -4717,15 +4686,12 @@
   if (FLAG_inline_new) {
     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
     // We want the untagged address first for performance
-    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
-                          DONT_TAG_RESULT);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
   } else {
     __ Branch(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
-  // Now that we have finished with the object's real address tag it
-  __ Daddu(reg, reg, kHeapObjectTag);
+  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
 }
 
 
@@ -4746,7 +4712,6 @@
   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
-  __ Dsubu(v0, v0, kHeapObjectTag);
   __ StoreToSafepointRegisterSlot(v0, reg);
 }
 
@@ -5293,7 +5258,7 @@
   Register scratch2 = ToRegister(instr->temp2());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -5301,6 +5266,12 @@
     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
     CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5371,6 +5342,49 @@
   CallRuntimeFromDeferred(
       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(v0, result);
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    Register top_address = scratch0();
+    __ Dsubu(v0, v0, Operand(kHeapObjectTag));
+    __ li(top_address, Operand(allocation_top));
+    __ sd(v0, MemOperand(top_address));
+    __ Daddu(v0, v0, Operand(kHeapObjectTag));
+  }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  }
 }
 
 
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.h b/src/crankshaft/mips64/lithium-codegen-mips64.h
index 4a700bd..9ac3192 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -136,8 +136,6 @@
 #undef DECLARE_DO
 
  private:
-  LanguageMode language_mode() const { return info()->language_mode(); }
-
   Scope* scope() const { return scope_; }
 
   Register scratch0() { return kLithiumScratchReg; }
diff --git a/src/crankshaft/mips64/lithium-mips64.cc b/src/crankshaft/mips64/lithium-mips64.cc
index bcfbc24..3ee9ab6 100644
--- a/src/crankshaft/mips64/lithium-mips64.cc
+++ b/src/crankshaft/mips64/lithium-mips64.cc
@@ -887,7 +887,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -939,17 +939,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), cp);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
@@ -2303,13 +2292,18 @@
 
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
   LOperand* size = UseRegisterOrConstant(instr->size());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
-  return AssignPointerMap(DefineAsRegister(result));
+  if (instr->IsAllocationFolded()) {
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 
diff --git a/src/crankshaft/mips64/lithium-mips64.h b/src/crankshaft/mips64/lithium-mips64.h
index 41cf93c..5d282ec 100644
--- a/src/crankshaft/mips64/lithium-mips64.h
+++ b/src/crankshaft/mips64/lithium-mips64.h
@@ -69,6 +69,7 @@
   V(Drop)                                    \
   V(Dummy)                                   \
   V(DummyUse)                                \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -80,7 +81,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -1131,22 +1131,6 @@
 };
 
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() const { return inputs_[0]; }
-  LOperand* left() const { return inputs_[1]; }
-  LOperand* right() const { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -2396,6 +2380,21 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* size() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
 
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc
index d5d0104..31d9ebe 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -326,8 +326,6 @@
       } else {
         __ b(&call_deopt_entry, SetLK);
       }
-      info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                   table_entry->deopt_info.inlining_id);
     }
 
     if (needs_frame.is_linked()) {
@@ -755,7 +753,7 @@
     __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
@@ -763,7 +761,6 @@
   if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
     DeoptComment(deopt_info);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
                                             !frame_is_built_);
@@ -2555,16 +2552,6 @@
 }
 
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(r3));
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
@@ -3614,8 +3601,13 @@
   }
 }
 
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  DoubleRegister output_reg = ToDoubleRegister(instr->result());
+  __ frim(output_reg, input_reg);
+}
 
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
   DoubleRegister input = ToDoubleRegister(instr->value());
   Register result = ToRegister(instr->result());
   Register input_high = scratch0();
@@ -3637,8 +3629,30 @@
   __ bind(&done);
 }
 
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  DoubleRegister output_reg = ToDoubleRegister(instr->result());
+  DoubleRegister dot_five = double_scratch0();
+  Label done;
 
-void LCodeGen::DoMathRound(LMathRound* instr) {
+  __ frin(output_reg, input_reg);
+  __ fcmpu(input_reg, kDoubleRegZero);
+  __ bge(&done);
+  __ fcmpu(output_reg, input_reg);
+  __ beq(&done);
+
+  // Negative, non-integer case
+  __ LoadDoubleLiteral(dot_five, 0.5, r0);
+  __ fadd(output_reg, input_reg, dot_five);
+  __ frim(output_reg, output_reg);
+  // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
+  __ fabs(output_reg, output_reg);
+  __ fneg(output_reg, output_reg);
+
+  __ bind(&done);
+}
+
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
   DoubleRegister input = ToDoubleRegister(instr->value());
   Register result = ToRegister(instr->result());
   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
@@ -4430,7 +4444,15 @@
 
     LOperand* key = instr->key();
     if (key->IsConstantOperand()) {
-      __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
+      LConstantOperand* constant_key = LConstantOperand::cast(key);
+      int32_t int_key = ToInteger32(constant_key);
+      if (Smi::IsValid(int_key)) {
+        __ LoadSmiLiteral(r6, Smi::FromInt(int_key));
+      } else {
+        // We should never get here at runtime because there is a smi check on
+        // the key before this point.
+        __ stop("expected smi");
+      }
     } else {
       __ SmiTag(r6, ToRegister(key));
     }
@@ -5311,7 +5333,7 @@
   class DeferredAllocate final : public LDeferredCode {
    public:
     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
-        : LDeferredCode(codegen), instr_(instr) {}
+        : LDeferredCode(codegen), instr_(instr) { }
     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
     LInstruction* instr() override { return instr_; }
 
@@ -5319,14 +5341,15 @@
     LAllocate* instr_;
   };
 
-  DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
+  DeferredAllocate* deferred =
+      new(zone()) DeferredAllocate(this, instr);
 
   Register result = ToRegister(instr->result());
   Register scratch = ToRegister(instr->temp1());
   Register scratch2 = ToRegister(instr->temp2());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -5335,6 +5358,12 @@
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
 
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
     CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5406,6 +5435,49 @@
   CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
                           instr->context());
   __ StoreToSafepointRegisterSlot(r3, result);
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    Register top_address = scratch0();
+    __ subi(r3, r3, Operand(kHeapObjectTag));
+    __ mov(top_address, Operand(allocation_top));
+    __ StoreP(r3, MemOperand(top_address));
+    __ addi(r3, r3, Operand(kHeapObjectTag));
+  }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  }
 }
 
 
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.h b/src/crankshaft/ppc/lithium-codegen-ppc.h
index 28f1680..730c4bd 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -128,8 +128,6 @@
 #undef DECLARE_DO
 
  private:
-  LanguageMode language_mode() const { return info()->language_mode(); }
-
   Scope* scope() const { return scope_; }
 
   Register scratch0() { return kLithiumScratch; }
diff --git a/src/crankshaft/ppc/lithium-ppc.cc b/src/crankshaft/ppc/lithium-ppc.cc
index b739786..0d9a617 100644
--- a/src/crankshaft/ppc/lithium-ppc.cc
+++ b/src/crankshaft/ppc/lithium-ppc.cc
@@ -892,7 +892,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -949,17 +949,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), cp);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
@@ -1107,20 +1096,32 @@
 
 
 LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegister(instr->value());
-  LMathFloor* result = new (zone()) LMathFloor(input);
-  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+  if (instr->representation().IsInteger32()) {
+    LMathFloorI* result = new (zone()) LMathFloorI(input);
+    return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    LMathFloorD* result = new (zone()) LMathFloorD(input);
+    return DefineAsRegister(result);
+  }
 }
 
-
 LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegister(instr->value());
-  LOperand* temp = TempDoubleRegister();
-  LMathRound* result = new (zone()) LMathRound(input, temp);
-  return AssignEnvironment(DefineAsRegister(result));
+  if (instr->representation().IsInteger32()) {
+    LOperand* temp = TempDoubleRegister();
+    LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
+    return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    LMathRoundD* result = new (zone()) LMathRoundD(input);
+    return DefineAsRegister(result);
+  }
 }
 
-
 LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
   LOperand* input = UseRegister(instr->value());
   LMathFround* result = new (zone()) LMathFround(input);
@@ -2307,13 +2308,18 @@
 
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
   LOperand* size = UseRegisterOrConstant(instr->size());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
-  return AssignPointerMap(DefineAsRegister(result));
+  if (instr->IsAllocationFolded()) {
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 
diff --git a/src/crankshaft/ppc/lithium-ppc.h b/src/crankshaft/ppc/lithium-ppc.h
index c39f620..f089b02 100644
--- a/src/crankshaft/ppc/lithium-ppc.h
+++ b/src/crankshaft/ppc/lithium-ppc.h
@@ -67,6 +67,7 @@
   V(Drop)                                    \
   V(Dummy)                                   \
   V(DummyUse)                                \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -78,7 +79,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -99,12 +99,14 @@
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathExp)                                 \
-  V(MathFloor)                               \
+  V(MathFloorD)                              \
+  V(MathFloorI)                              \
   V(MathFround)                              \
   V(MathLog)                                 \
   V(MathMinMax)                              \
   V(MathPowHalf)                             \
-  V(MathRound)                               \
+  V(MathRoundD)                              \
+  V(MathRoundI)                              \
   V(MathSqrt)                                \
   V(MaybeGrowElements)                       \
   V(ModByConstI)                             \
@@ -151,7 +153,6 @@
   V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
-
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
   Opcode opcode() const final { return LInstruction::k##type; } \
   void CompileToNative(LCodeGen* generator) final;              \
@@ -807,21 +808,43 @@
   void PrintDataTo(StringStream* stream) override;
 };
 
-
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+// Math.floor with a double result.
+class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
+  explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
 
-  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
-
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+// Math.floor with an integer result.
+class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathRound(LOperand* value, LOperand* temp) {
+  explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.round with a double result.
+class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.round with an integer result.
+class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRoundI(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
@@ -829,7 +852,7 @@
   LOperand* value() { return inputs_[0]; }
   LOperand* temp() { return temps_[0]; }
 
-  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
@@ -1092,22 +1115,6 @@
 };
 
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() const { return inputs_[0]; }
-  LOperand* left() const { return inputs_[1]; }
-  LOperand* right() const { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -2294,6 +2301,22 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* size() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
 
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
diff --git a/src/crankshaft/s390/lithium-codegen-s390.cc b/src/crankshaft/s390/lithium-codegen-s390.cc
index 689f4bc..38d1808 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -312,8 +312,6 @@
       } else {
         __ b(r14, &call_deopt_entry);
       }
-      info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                   table_entry->deopt_info.inlining_id);
     }
 
     if (needs_frame.is_linked()) {
@@ -746,14 +744,13 @@
     __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
                                             !frame_is_built_);
@@ -2541,15 +2538,6 @@
   EmitBranch(instr, eq);
 }
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(r2));
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
@@ -4387,7 +4375,15 @@
 
     LOperand* key = instr->key();
     if (key->IsConstantOperand()) {
-      __ LoadSmiLiteral(r5, ToSmi(LConstantOperand::cast(key)));
+      LConstantOperand* constant_key = LConstantOperand::cast(key);
+      int32_t int_key = ToInteger32(constant_key);
+      if (Smi::IsValid(int_key)) {
+        __ LoadSmiLiteral(r5, Smi::FromInt(int_key));
+      } else {
+        // We should never get here at runtime because there is a smi check on
+        // the key before this point.
+        __ stop("expected smi");
+      }
     } else {
       __ SmiTag(r5, ToRegister(key));
     }
@@ -5244,7 +5240,7 @@
   Register scratch2 = ToRegister(instr->temp2());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -5253,6 +5249,12 @@
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
 
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
     CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5331,6 +5333,49 @@
   CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
                           instr->context());
   __ StoreToSafepointRegisterSlot(r2, result);
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    Register top_address = scratch0();
+    __ SubP(r2, r2, Operand(kHeapObjectTag));
+    __ mov(top_address, Operand(allocation_top));
+    __ StoreP(r2, MemOperand(top_address));
+    __ AddP(r2, r2, Operand(kHeapObjectTag));
+  }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, scratch1, scratch2, flags);
+  }
 }
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
diff --git a/src/crankshaft/s390/lithium-codegen-s390.h b/src/crankshaft/s390/lithium-codegen-s390.h
index 6d364cb..7721b30 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.h
+++ b/src/crankshaft/s390/lithium-codegen-s390.h
@@ -127,8 +127,6 @@
 #undef DECLARE_DO
 
  private:
-  LanguageMode language_mode() const { return info()->language_mode(); }
-
   Scope* scope() const { return scope_; }
 
   Register scratch0() { return kLithiumScratch; }
diff --git a/src/crankshaft/s390/lithium-s390.cc b/src/crankshaft/s390/lithium-s390.cc
index a18f877..fbc1970 100644
--- a/src/crankshaft/s390/lithium-s390.cc
+++ b/src/crankshaft/s390/lithium-s390.cc
@@ -815,7 +815,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -865,16 +865,6 @@
   return DefineAsRegister(new (zone()) LArgumentsElements);
 }
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), cp);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
@@ -2124,13 +2114,18 @@
 }
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
   LOperand* size = UseRegisterOrConstant(instr->size());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
-  return AssignPointerMap(DefineAsRegister(result));
+  if (instr->IsAllocationFolded()) {
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
diff --git a/src/crankshaft/s390/lithium-s390.h b/src/crankshaft/s390/lithium-s390.h
index b6a1614..407d45d 100644
--- a/src/crankshaft/s390/lithium-s390.h
+++ b/src/crankshaft/s390/lithium-s390.h
@@ -67,6 +67,7 @@
   V(Drop)                                    \
   V(Dummy)                                   \
   V(DummyUse)                                \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -78,7 +79,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -1035,21 +1035,6 @@
   Token::Value op() const { return hydrogen()->token(); }
 };
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() const { return inputs_[0]; }
-  LOperand* left() const { return inputs_[1]; }
-  LOperand* right() const { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -2165,6 +2150,22 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* size() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
diff --git a/src/crankshaft/typing.cc b/src/crankshaft/typing.cc
index 69d7efe..9bd09ac 100644
--- a/src/crankshaft/typing.cc
+++ b/src/crankshaft/typing.cc
@@ -14,9 +14,9 @@
 namespace v8 {
 namespace internal {
 
-
 AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
-                   Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root)
+                   Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root,
+                   AstTypeBounds* bounds)
     : isolate_(isolate),
       zone_(zone),
       closure_(closure),
@@ -26,7 +26,8 @@
       oracle_(isolate, zone, handle(closure->shared()->code()),
               handle(closure->shared()->feedback_vector()),
               handle(closure->context()->native_context())),
-      store_(zone) {
+      store_(zone),
+      bounds_(bounds) {
   InitializeAstVisitor(isolate);
 }
 
@@ -304,14 +305,7 @@
   store_.Forget();  // Control may transfer here via 'break'.
 }
 
-
-void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
-  RECURSE(Visit(stmt->iterable()));
-  store_.Forget();  // Control may transfer here via looping or 'continue'.
-  RECURSE(Visit(stmt->body()));
-  store_.Forget();  // Control may transfer here via 'break'.
-}
-
+void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {}
 
 void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
   Effects try_effects = EnterEffects();
@@ -353,7 +347,7 @@
 void AstTyper::VisitDoExpression(DoExpression* expr) {
   RECURSE(VisitBlock(expr->block()));
   RECURSE(VisitVariableProxy(expr->result()));
-  NarrowType(expr, expr->result()->bounds());
+  NarrowType(expr, bounds_->get(expr->result()));
 }
 
 
@@ -371,9 +365,9 @@
   then_effects.Alt(else_effects);
   store_.Seq(then_effects);
 
-  NarrowType(expr, Bounds::Either(
-      expr->then_expression()->bounds(),
-      expr->else_expression()->bounds(), zone()));
+  NarrowType(expr,
+             Bounds::Either(bounds_->get(expr->then_expression()),
+                            bounds_->get(expr->else_expression()), zone()));
 }
 
 
@@ -464,11 +458,11 @@
       expr->is_compound() ? expr->binary_operation() : expr->value();
   RECURSE(Visit(expr->target()));
   RECURSE(Visit(rhs));
-  NarrowType(expr, rhs->bounds());
+  NarrowType(expr, bounds_->get(rhs));
 
   VariableProxy* proxy = expr->target()->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsStackAllocated()) {
-    store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+    store_.Seq(variable_index(proxy->var()), Effect(bounds_->get(expr)));
   }
 }
 
@@ -628,7 +622,7 @@
 
   VariableProxy* proxy = expr->expression()->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsStackAllocated()) {
-    store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+    store_.Seq(variable_index(proxy->var()), Effect(bounds_->get(expr)));
   }
 }
 
@@ -656,7 +650,7 @@
     case Token::COMMA:
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      NarrowType(expr, expr->right()->bounds());
+      NarrowType(expr, bounds_->get(expr->right()));
       break;
     case Token::OR:
     case Token::AND: {
@@ -669,16 +663,16 @@
       left_effects.Alt(right_effects);
       store_.Seq(left_effects);
 
-      NarrowType(expr, Bounds::Either(
-          expr->left()->bounds(), expr->right()->bounds(), zone()));
+      NarrowType(expr, Bounds::Either(bounds_->get(expr->left()),
+                                      bounds_->get(expr->right()), zone()));
       break;
     }
     case Token::BIT_OR:
     case Token::BIT_AND: {
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      Type* upper = Type::Union(
-          expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
+      Type* upper = Type::Union(bounds_->get(expr->left()).upper,
+                                bounds_->get(expr->right()).upper, zone());
       if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
       Type* lower = Type::Intersect(Type::SignedSmall(), upper, zone());
       NarrowType(expr, Bounds(lower, upper));
@@ -702,8 +696,8 @@
     case Token::ADD: {
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      Bounds l = expr->left()->bounds();
-      Bounds r = expr->right()->bounds();
+      Bounds l = bounds_->get(expr->left());
+      Bounds r = bounds_->get(expr->right());
       Type* lower =
           !l.lower->IsInhabited() || !r.lower->IsInhabited()
               ? Type::None()
diff --git a/src/crankshaft/typing.h b/src/crankshaft/typing.h
index 40b538a..27b6809 100644
--- a/src/crankshaft/typing.h
+++ b/src/crankshaft/typing.h
@@ -6,6 +6,7 @@
 #define V8_CRANKSHAFT_TYPING_H_
 
 #include "src/allocation.h"
+#include "src/ast/ast-type-bounds.h"
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/effects.h"
@@ -16,11 +17,11 @@
 namespace v8 {
 namespace internal {
 
-
 class AstTyper: public AstVisitor {
  public:
   AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
-           Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root);
+           Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root,
+           AstTypeBounds* bounds);
   void Run();
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -41,15 +42,16 @@
   FunctionLiteral* root_;
   TypeFeedbackOracle oracle_;
   Store store_;
+  AstTypeBounds* bounds_;
 
   Zone* zone() const { return zone_; }
   TypeFeedbackOracle* oracle() { return &oracle_; }
 
   void NarrowType(Expression* e, Bounds b) {
-    e->set_bounds(Bounds::Both(e->bounds(), b, zone()));
+    bounds_->set(e, Bounds::Both(bounds_->get(e), b, zone()));
   }
   void NarrowLowerType(Expression* e, Type* t) {
-    e->set_bounds(Bounds::NarrowLower(e->bounds(), t, zone()));
+    bounds_->set(e, Bounds::NarrowLower(bounds_->get(e), t, zone()));
   }
 
   Effects EnterEffects() {
diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc
index fbda59b..350543e 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -168,11 +168,11 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in rdi.
-    int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ Push(rdi);
@@ -299,8 +299,6 @@
       }
       __ call(entry, RelocInfo::RUNTIME_ENTRY);
     }
-    info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                 table_entry->deopt_info.inlining_id);
   }
 
   if (needs_frame.is_linked()) {
@@ -748,7 +746,7 @@
     __ bind(&done);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
@@ -757,7 +755,6 @@
       !info()->saves_caller_doubles()) {
     DeoptComment(deopt_info);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
                                             !frame_is_built_);
@@ -2441,16 +2438,6 @@
 }
 
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(rax));
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
@@ -3287,8 +3274,19 @@
   DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
 
   Label slow, allocated, done;
-  Register tmp = input_reg.is(rax) ? rcx : rax;
-  Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
+  uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit();
+  available_regs &= ~input_reg.bit();
+  if (instr->context()->IsRegister()) {
+    // Make sure that the context isn't overwritten in the AllocateHeapNumber
+    // macro below.
+    available_regs &= ~ToRegister(instr->context()).bit();
+  }
+
+  Register tmp =
+      Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+  available_regs &= ~tmp.bit();
+  Register tmp2 =
+      Register::from_code(base::bits::CountTrailingZeros32(available_regs));
 
   // Preserve the value of all registers.
   PushSafepointRegistersScope scope(this);
@@ -5162,7 +5160,7 @@
   Register temp = ToRegister(instr->temp());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -5171,6 +5169,11 @@
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
 
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
     CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5200,6 +5203,29 @@
   }
 }
 
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, temp, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, temp, flags);
+  }
+}
 
 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   Register result = ToRegister(instr->result());
@@ -5232,6 +5258,22 @@
   CallRuntimeFromDeferred(
       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, rax);
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    __ subp(rax, Immediate(kHeapObjectTag));
+    __ Store(allocation_top, rax);
+    __ addp(rax, Immediate(kHeapObjectTag));
+  }
 }
 
 
diff --git a/src/crankshaft/x64/lithium-codegen-x64.h b/src/crankshaft/x64/lithium-codegen-x64.h
index 139645e..f643e2b 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/src/crankshaft/x64/lithium-codegen-x64.h
@@ -111,8 +111,6 @@
 #undef DECLARE_DO
 
  private:
-  LanguageMode language_mode() const { return info()->language_mode(); }
-
   LPlatformChunk* chunk() const { return chunk_; }
   Scope* scope() const { return scope_; }
   HGraph* graph() const { return chunk()->graph(); }
diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc
index e86b90c..daedd72 100644
--- a/src/crankshaft/x64/lithium-x64.cc
+++ b/src/crankshaft/x64/lithium-x64.cc
@@ -907,7 +907,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -957,17 +957,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
@@ -2408,14 +2397,19 @@
 
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
-  LOperand* size = instr->size()->IsConstant()
-      ? UseConstant(instr->size())
-      : UseTempRegister(instr->size());
-  LOperand* temp = TempRegister();
-  LAllocate* result = new(zone()) LAllocate(context, size, temp);
-  return AssignPointerMap(DefineAsRegister(result));
+  LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
+                                               : UseRegister(instr->size());
+  if (instr->IsAllocationFolded()) {
+    LOperand* temp = TempRegister();
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LOperand* temp = TempRegister();
+    LAllocate* result = new (zone()) LAllocate(context, size, temp);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 
diff --git a/src/crankshaft/x64/lithium-x64.h b/src/crankshaft/x64/lithium-x64.h
index 1feba4b..91f5541 100644
--- a/src/crankshaft/x64/lithium-x64.h
+++ b/src/crankshaft/x64/lithium-x64.h
@@ -67,6 +67,7 @@
   V(Drop)                                    \
   V(DummyUse)                                \
   V(Dummy)                                   \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -78,7 +79,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -1136,22 +1136,6 @@
 };
 
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* left() { return inputs_[1]; }
-  LOperand* right() { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -2385,6 +2369,19 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp) {
+    inputs_[0] = size;
+    temps_[0] = temp;
+  }
+
+  LOperand* size() const { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
 
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc
index 1751199..641a87a 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -135,11 +135,11 @@
   Comment(";;; Prologue begin");
 
   // Possibly allocate a local context.
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     Comment(";;; Allocate local context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in edi.
-    int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
     if (info()->scope()->is_script_scope()) {
       __ push(edi);
@@ -201,6 +201,13 @@
 
   osr_pc_offset_ = masm()->pc_offset();
 
+  // Interpreter is the first tier compiler now. It will run the code generated
+  // by TurboFan compiler which will always put "1" on x87 FPU stack.
+  // This behavior will affect crankshaft's x87 FPU stack depth check under
+  // debug mode.
+  // Need to reset the FPU stack here for this scenario.
+  __ fninit();
+
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
@@ -268,8 +275,6 @@
     } else {
       __ call(entry, RelocInfo::RUNTIME_ENTRY);
     }
-    info()->LogDeoptCallPosition(masm()->pc_offset(),
-                                 table_entry->deopt_info.inlining_id);
   }
   if (needs_frame.is_linked()) {
     __ bind(&needs_frame);
@@ -1009,13 +1014,12 @@
     __ bind(&done);
   }
 
-  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
 
   DCHECK(info()->IsStub() || frame_is_built_);
   if (cc == no_condition && frame_is_built_) {
     DeoptComment(deopt_info);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
-    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
   } else {
     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
                                             !frame_is_built_);
@@ -2572,16 +2576,6 @@
 }
 
 
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
-  DCHECK(ToRegister(instr->result()).is(eax));
-  InstanceOfStub stub(isolate());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoHasInPrototypeChainAndBranch(
     LHasInPrototypeChainAndBranch* instr) {
   Register const object = ToRegister(instr->object());
@@ -2999,9 +2993,9 @@
     __ j(not_equal, &done);
     if (info()->IsStub()) {
       // A stub can safely convert the hole to undefined only if the array
-      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
-      // it needs to bail out.
-      __ mov(result, isolate()->factory()->array_protector());
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid.
+      // Otherwise it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
              Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
       DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
@@ -3362,8 +3356,19 @@
   DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
 
   Label slow, allocated, done;
-  Register tmp = input_reg.is(eax) ? ecx : eax;
-  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+  uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
+  available_regs &= ~input_reg.bit();
+  if (instr->context()->IsRegister()) {
+    // Make sure that the context isn't overwritten in the AllocateHeapNumber
+    // macro below.
+    available_regs &= ~ToRegister(instr->context()).bit();
+  }
+
+  Register tmp =
+      Register::from_code(base::bits::CountTrailingZeros32(available_regs));
+  available_regs &= ~tmp.bit();
+  Register tmp2 =
+      Register::from_code(base::bits::CountTrailingZeros32(available_regs));
 
   // Preserve the value of all registers.
   PushSafepointRegistersScope scope(this);
@@ -4364,7 +4369,15 @@
 
     LOperand* key = instr->key();
     if (key->IsConstantOperand()) {
-      __ mov(ebx, ToImmediate(key, Representation::Smi()));
+      LConstantOperand* constant_key = LConstantOperand::cast(key);
+      int32_t int_key = ToInteger32(constant_key);
+      if (Smi::IsValid(int_key)) {
+        __ mov(ebx, Immediate(Smi::FromInt(int_key)));
+      } else {
+        // We should never get here at runtime because there is a smi check on
+        // the key before this point.
+        __ int3();
+      }
     } else {
       __ Move(ebx, ToRegister(key));
       __ SmiTag(ebx);
@@ -5396,7 +5409,7 @@
   Register temp = ToRegister(instr->temp());
 
   // Allocate memory for the object.
-  AllocationFlags flags = TAG_OBJECT;
+  AllocationFlags flags = NO_ALLOCATION_FLAGS;
   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
@@ -5405,6 +5418,11 @@
     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   }
 
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+  }
+  DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
   if (instr->size()->IsConstantOperand()) {
     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
     CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5434,6 +5452,29 @@
   }
 }
 
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+  DCHECK(instr->hydrogen()->IsAllocationFolded());
+  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+
+  AllocationFlags flags = ALLOCATION_FOLDED;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ FastAllocate(size, result, temp, flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ FastAllocate(size, result, temp, flags);
+  }
+}
 
 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   Register result = ToRegister(instr->result());
@@ -5473,6 +5514,22 @@
   CallRuntimeFromDeferred(
       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, eax);
+
+  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+    if (instr->hydrogen()->IsOldSpaceAllocation()) {
+      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+    }
+    // If the allocation folding dominator allocate triggered a GC, allocation
+    // happend in the runtime. We have to reset the top pointer to virtually
+    // undo the allocation.
+    ExternalReference allocation_top =
+        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+    __ sub(eax, Immediate(kHeapObjectTag));
+    __ mov(Operand::StaticVariable(allocation_top), eax);
+    __ add(eax, Immediate(kHeapObjectTag));
+  }
 }
 
 
diff --git a/src/crankshaft/x87/lithium-codegen-x87.h b/src/crankshaft/x87/lithium-codegen-x87.h
index 3719236..f979cf0 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/src/crankshaft/x87/lithium-codegen-x87.h
@@ -150,8 +150,6 @@
 #undef DECLARE_DO
 
  private:
-  LanguageMode language_mode() const { return info()->language_mode(); }
-
   Scope* scope() const { return scope_; }
 
   void EmitClassOfTest(Label* if_true,
diff --git a/src/crankshaft/x87/lithium-x87.cc b/src/crankshaft/x87/lithium-x87.cc
index 163d2c9..7df70ae 100644
--- a/src/crankshaft/x87/lithium-x87.cc
+++ b/src/crankshaft/x87/lithium-x87.cc
@@ -925,7 +925,7 @@
 
 LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
   LInstruction* result = new (zone()) LPrologue();
-  if (info_->num_heap_slots() > 0) {
+  if (info_->scope()->num_heap_slots() > 0) {
     result = MarkAsCall(result, instr);
   }
   return result;
@@ -983,17 +983,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LOperand* left =
-      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
-  LOperand* right =
-      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
-  LOperand* context = UseFixed(instr->context(), esi);
-  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
     HHasInPrototypeChainAndBranch* instr) {
   LOperand* object = UseRegister(instr->object());
@@ -2399,14 +2388,19 @@
 
 
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
-  info()->MarkAsDeferredCalling();
-  LOperand* context = UseAny(instr->context());
-  LOperand* size = instr->size()->IsConstant()
-      ? UseConstant(instr->size())
-      : UseTempRegister(instr->size());
-  LOperand* temp = TempRegister();
-  LAllocate* result = new(zone()) LAllocate(context, size, temp);
-  return AssignPointerMap(DefineAsRegister(result));
+  LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
+                                               : UseRegister(instr->size());
+  if (instr->IsAllocationFolded()) {
+    LOperand* temp = TempRegister();
+    LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
+    return DefineAsRegister(result);
+  } else {
+    info()->MarkAsDeferredCalling();
+    LOperand* context = UseAny(instr->context());
+    LOperand* temp = TempRegister();
+    LAllocate* result = new (zone()) LAllocate(context, size, temp);
+    return AssignPointerMap(DefineAsRegister(result));
+  }
 }
 
 
diff --git a/src/crankshaft/x87/lithium-x87.h b/src/crankshaft/x87/lithium-x87.h
index d83322a..98703ae 100644
--- a/src/crankshaft/x87/lithium-x87.h
+++ b/src/crankshaft/x87/lithium-x87.h
@@ -72,6 +72,7 @@
   V(Drop)                                    \
   V(Dummy)                                   \
   V(DummyUse)                                \
+  V(FastAllocate)                            \
   V(FlooringDivByConstI)                     \
   V(FlooringDivByPowerOf2I)                  \
   V(FlooringDivI)                            \
@@ -83,7 +84,6 @@
   V(HasInPrototypeChainAndBranch)            \
   V(HasInstanceTypeAndBranch)                \
   V(InnerAllocatedObject)                    \
-  V(InstanceOf)                              \
   V(InstructionGap)                          \
   V(Integer32ToDouble)                       \
   V(InvokeFunction)                          \
@@ -153,7 +153,6 @@
   V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
-
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
   Opcode opcode() const final { return LInstruction::k##type; } \
   void CompileToNative(LCodeGen* generator) final;              \
@@ -1134,22 +1133,6 @@
 };
 
 
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
-  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  LOperand* context() const { return inputs_[0]; }
-  LOperand* left() const { return inputs_[1]; }
-  LOperand* right() const { return inputs_[2]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
 class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
  public:
   LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
@@ -2398,6 +2381,19 @@
   DECLARE_HYDROGEN_ACCESSOR(Allocate)
 };
 
+class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LFastAllocate(LOperand* size, LOperand* temp) {
+    inputs_[0] = size;
+    temps_[0] = temp;
+  }
+
+  LOperand* size() const { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
 
 class LTypeof final : public LTemplateInstruction<1, 2, 0> {
  public:
diff --git a/src/d8.cc b/src/d8.cc
index 0688380..9466ab7 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -19,6 +19,7 @@
 
 #ifndef V8_SHARED
 #include <algorithm>
+#include <fstream>
 #include <vector>
 #endif  // !V8_SHARED
 
@@ -41,6 +42,7 @@
 #include "src/base/platform/platform.h"
 #include "src/base/sys-info.h"
 #include "src/basic-block-profiler.h"
+#include "src/interpreter/interpreter.h"
 #include "src/snapshot/natives.h"
 #include "src/utils.h"
 #include "src/v8.h"
@@ -1275,6 +1277,21 @@
 inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) {
   return strcmp(lhs.key, rhs.key) < 0;
 }
+
+void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
+  HandleScope handle_scope(isolate);
+  Local<Context> context = Context::New(isolate);
+  Context::Scope context_scope(context);
+
+  Local<Object> dispatch_counters = reinterpret_cast<i::Isolate*>(isolate)
+                                        ->interpreter()
+                                        ->GetDispatchCountersObject();
+  std::ofstream dispatch_counters_stream(
+      i::FLAG_trace_ignition_dispatches_output_file);
+  dispatch_counters_stream << *String::Utf8Value(
+      JSON::Stringify(context, dispatch_counters).ToLocalChecked());
+}
+
 #endif  // !V8_SHARED
 
 
@@ -1312,6 +1329,7 @@
            "-------------+\n");
     delete [] counters;
   }
+
   delete counters_file_;
   delete counter_map_;
 #endif  // !V8_SHARED
@@ -2476,6 +2494,13 @@
       RunShell(isolate);
     }
 
+#ifndef V8_SHARED
+    if (i::FLAG_ignition && i::FLAG_trace_ignition_dispatches &&
+        i::FLAG_trace_ignition_dispatches_output_file != nullptr) {
+      WriteIgnitionDispatchCountersFile(isolate);
+    }
+#endif
+
     // Shut down contexts and collect garbage.
     evaluation_context_.Reset();
 #ifndef V8_SHARED
diff --git a/src/d8.gyp b/src/d8.gyp
index f249a78..e60b01f 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -31,16 +31,15 @@
     # Enable support for Intel VTune. Supported on ia32/x64 only
     'v8_enable_vtunejit%': 0,
     'v8_enable_i18n_support%': 1,
-    'v8_toolset_for_d8%': 'target',
   },
-  'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
+  'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
   'targets': [
     {
       'target_name': 'd8',
       'type': 'executable',
       'dependencies': [
-        '../tools/gyp/v8.gyp:v8',
-        '../tools/gyp/v8.gyp:v8_libplatform',
+        'v8.gyp:v8',
+        'v8.gyp:v8_libplatform',
       ],
       # Generated source files need this explicitly:
       'include_dirs+': [
@@ -52,7 +51,7 @@
       ],
       'conditions': [
         [ 'want_separate_host_toolset==1', {
-          'toolsets': [ '<(v8_toolset_for_d8)', ],
+          'toolsets': [ 'target', ],
         }],
         ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
            or OS=="openbsd" or OS=="solaris" or OS=="android" \
@@ -141,7 +140,7 @@
     },
   ],
   'conditions': [
-    ['test_isolation_mode != "noop" and v8_toolset_for_d8 == "target"', {
+    ['test_isolation_mode != "noop"', {
       'targets': [
         {
           'target_name': 'd8_run',
@@ -150,7 +149,7 @@
             'd8',
           ],
           'includes': [
-            '../build/isolate.gypi',
+            '../gypfiles/isolate.gypi',
           ],
           'sources': [
             'd8.isolate',
diff --git a/src/d8.h b/src/d8.h
index 321d9c1..e51e8ee 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -461,6 +461,7 @@
   static i::List<Worker*> workers_;
   static i::List<SharedArrayBuffer::Contents> externalized_shared_contents_;
 
+  static void WriteIgnitionDispatchCountersFile(v8::Isolate* isolate);
   static Counter* GetCounter(const char* name, bool is_histogram);
   static Local<String> Stringify(Isolate* isolate, Local<Value> value);
 #endif  // !V8_SHARED
diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc
index 1729408..d5ebaa5 100644
--- a/src/debug/debug-evaluate.cc
+++ b/src/debug/debug-evaluate.cc
@@ -95,11 +95,12 @@
   }
 
   Handle<JSFunction> eval_fun;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, eval_fun,
-                             Compiler::GetFunctionFromEval(
-                                 source, outer_info, context, SLOPPY,
-                                 NO_PARSE_RESTRICTION, RelocInfo::kNoPosition),
-                             Object);
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, eval_fun,
+      Compiler::GetFunctionFromEval(
+          source, outer_info, context, SLOPPY, NO_PARSE_RESTRICTION,
+          RelocInfo::kNoPosition, RelocInfo::kNoPosition),
+      Object);
 
   Handle<Object> result;
   ASSIGN_RETURN_ON_EXCEPTION(
@@ -182,7 +183,8 @@
         context_chain_element.wrapped_context = current_context;
       }
       context_chain_.Add(context_chain_element);
-    } else if (scope_type == ScopeIterator::ScopeTypeBlock) {
+    } else if (scope_type == ScopeIterator::ScopeTypeBlock ||
+               scope_type == ScopeIterator::ScopeTypeEval) {
       Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
       frame_inspector.MaterializeStackLocals(materialized,
                                              it.CurrentScopeInfo());
@@ -247,7 +249,8 @@
     // 'this' is allocated in an outer context and is is already being
     // referenced by the current function, so it can be correctly resolved.
     return;
-  } else if (local_function->shared()->scope_info()->HasReceiver()) {
+  } else if (local_function->shared()->scope_info()->HasReceiver() &&
+             !frame_->receiver()->IsTheHole()) {
     recv = handle(frame_->receiver(), isolate_);
   }
   JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
diff --git a/src/debug/debug-frames.cc b/src/debug/debug-frames.cc
index a7956ff..453a77d 100644
--- a/src/debug/debug-frames.cc
+++ b/src/debug/debug-frames.cc
@@ -72,8 +72,7 @@
     return deoptimized_frame_->GetSourcePosition();
   } else if (is_interpreted_) {
     InterpretedFrame* frame = reinterpret_cast<InterpretedFrame*>(frame_);
-    BytecodeArray* bytecode_array =
-        frame->function()->shared()->bytecode_array();
+    BytecodeArray* bytecode_array = frame->GetBytecodeArray();
     return bytecode_array->SourcePosition(frame->GetBytecodeOffset());
   } else {
     Code* code = frame_->LookupCode();
@@ -117,6 +116,7 @@
     // TODO(yangguo): check whether this is necessary, now that we materialize
     //                context locals as well.
     Handle<String> name(scope_info->ParameterName(i));
+    if (ScopeInfo::VariableIsSynthetic(*name)) continue;
     if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
 
     Handle<Object> value =
@@ -130,8 +130,8 @@
 
   // Second fill all stack locals.
   for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
-    if (scope_info->LocalIsSynthetic(i)) continue;
     Handle<String> name(scope_info->StackLocalName(i));
+    if (ScopeInfo::VariableIsSynthetic(*name)) continue;
     Handle<Object> value = GetExpression(scope_info->StackLocalIndex(i));
     // TODO(yangguo): We convert optimized out values to {undefined} when they
     // are passed to the debugger. Eventually we should handle them somehow.
@@ -163,6 +163,7 @@
   for (int i = 0; i < scope_info->ParameterCount(); ++i) {
     // Shadowed parameters were not materialized.
     Handle<String> name(scope_info->ParameterName(i));
+    if (ScopeInfo::VariableIsSynthetic(*name)) continue;
     if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
 
     DCHECK(!frame_->GetParameter(i)->IsTheHole());
@@ -173,13 +174,12 @@
 
   // Stack locals.
   for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
-    if (scope_info->LocalIsSynthetic(i)) continue;
+    Handle<String> name(scope_info->StackLocalName(i));
+    if (ScopeInfo::VariableIsSynthetic(*name)) continue;
     int index = scope_info->StackLocalIndex(i);
     if (frame_->GetExpression(index)->IsTheHole()) continue;
     Handle<Object> value =
-        Object::GetPropertyOrElement(
-            target, handle(scope_info->StackLocalName(i), isolate_))
-            .ToHandleChecked();
+        Object::GetPropertyOrElement(target, name).ToHandleChecked();
     frame_->SetExpression(index, *value);
   }
 }
diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc
index d9c615b..1602711 100644
--- a/src/debug/debug-scopes.cc
+++ b/src/debug/debug-scopes.cc
@@ -5,6 +5,7 @@
 #include "src/debug/debug-scopes.h"
 
 #include "src/ast/scopes.h"
+#include "src/compiler.h"
 #include "src/debug/debug.h"
 #include "src/frames-inl.h"
 #include "src/globals.h"
@@ -80,34 +81,29 @@
   }
 
   // Reparse the code and analyze the scopes.
-  Scope* scope = NULL;
   // Check whether we are in global, eval or function code.
   Zone zone(isolate->allocator());
+  base::SmartPointer<ParseInfo> info;
   if (scope_info->scope_type() != FUNCTION_SCOPE) {
     // Global or eval code.
     Handle<Script> script(Script::cast(shared_info->script()));
-    ParseInfo info(&zone, script);
+    info.Reset(new ParseInfo(&zone, script));
+    info->set_toplevel();
     if (scope_info->scope_type() == SCRIPT_SCOPE) {
-      info.set_global();
+      info->set_global();
     } else {
       DCHECK(scope_info->scope_type() == EVAL_SCOPE);
-      info.set_eval();
-      info.set_context(Handle<Context>(function->context()));
+      info->set_eval();
+      info->set_context(Handle<Context>(function->context()));
     }
-    if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
-      scope = info.literal()->scope();
-    }
-    if (!ignore_nested_scopes) RetrieveScopeChain(scope);
-    if (collect_non_locals) CollectNonLocals(scope);
   } else {
-    // Function code
-    ParseInfo info(&zone, function);
-    if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
-      scope = info.literal()->scope();
-    }
-    if (!ignore_nested_scopes) RetrieveScopeChain(scope);
-    if (collect_non_locals) CollectNonLocals(scope);
+    // Inner function.
+    info.Reset(new ParseInfo(&zone, function));
   }
+  Scope* scope = NULL;
+  if (Compiler::ParseAndAnalyze(info.get())) scope = info->literal()->scope();
+  if (!ignore_nested_scopes) RetrieveScopeChain(scope);
+  if (collect_non_locals) CollectNonLocals(scope);
   UnwrapEvaluationContext();
 }
 
@@ -126,8 +122,6 @@
   while (true) {
     if (context_.is_null()) return;
     if (!context_->IsDebugEvaluateContext()) return;
-    // An existing debug-evaluate context can only be outside the local scope.
-    DCHECK(nested_scope_chain_.is_empty());
     Handle<Object> wrapped(context_->get(Context::WRAPPED_CONTEXT_INDEX),
                            isolate_);
     if (wrapped->IsContext()) {
@@ -201,11 +195,15 @@
   } else if (nested_scope_chain_.is_empty()) {
     context_ = Handle<Context>(context_->previous(), isolate_);
   } else {
-    if (nested_scope_chain_.last().scope_info->HasContext()) {
-      DCHECK(context_->previous() != NULL);
-      context_ = Handle<Context>(context_->previous(), isolate_);
-    }
-    nested_scope_chain_.RemoveLast();
+    do {
+      if (nested_scope_chain_.last().scope_info->HasContext()) {
+        DCHECK(context_->previous() != NULL);
+        context_ = Handle<Context>(context_->previous(), isolate_);
+      }
+      nested_scope_chain_.RemoveLast();
+      if (nested_scope_chain_.is_empty()) break;
+      // Repeat to skip hidden scopes.
+    } while (nested_scope_chain_.last().is_hidden());
   }
   UnwrapEvaluationContext();
 }
@@ -236,8 +234,10 @@
         DCHECK(!scope_info->HasContext() || context_->IsBlockContext());
         return ScopeTypeBlock;
       case EVAL_SCOPE:
-        UNREACHABLE();
+        DCHECK(!scope_info->HasContext() || context_->IsFunctionContext());
+        return ScopeTypeEval;
     }
+    UNREACHABLE();
   }
   if (context_->IsNativeContext()) {
     DCHECK(context_->global_object()->IsJSGlobalObject());
@@ -284,7 +284,8 @@
       // Materialize the content of the closure scope into a JSObject.
       return MaterializeClosure();
     case ScopeIterator::ScopeTypeBlock:
-      return MaterializeBlockScope();
+    case ScopeIterator::ScopeTypeEval:
+      return MaterializeInnerScope();
     case ScopeIterator::ScopeTypeModule:
       return MaterializeModuleScope();
   }
@@ -295,7 +296,8 @@
 
 bool ScopeIterator::HasContext() {
   ScopeType type = Type();
-  if (type == ScopeTypeBlock || type == ScopeTypeLocal) {
+  if (type == ScopeTypeBlock || type == ScopeTypeLocal ||
+      type == ScopeTypeEval) {
     if (!nested_scope_chain_.is_empty()) {
       return nested_scope_chain_.last().scope_info->HasContext();
     }
@@ -321,7 +323,8 @@
     case ScopeIterator::ScopeTypeScript:
       return SetScriptVariableValue(variable_name, new_value);
     case ScopeIterator::ScopeTypeBlock:
-      return SetBlockVariableValue(variable_name, new_value);
+    case ScopeIterator::ScopeTypeEval:
+      return SetInnerScopeVariableValue(variable_name, new_value);
     case ScopeIterator::ScopeTypeModule:
       // TODO(2399): should we implement it?
       break;
@@ -453,7 +456,7 @@
       global->native_context()->script_context_table());
 
   Handle<JSObject> script_scope =
-      isolate_->factory()->NewJSObject(isolate_->object_function());
+      isolate_->factory()->NewJSObjectWithNullProto();
 
   for (int context_index = 0; context_index < script_contexts->used();
        context_index++) {
@@ -470,7 +473,7 @@
   Handle<JSFunction> function = GetFunction();
 
   Handle<JSObject> local_scope =
-      isolate_->factory()->NewJSObject(isolate_->object_function());
+      isolate_->factory()->NewJSObjectWithNullProto();
   frame_inspector_->MaterializeStackLocals(local_scope, function);
 
   Handle<Context> frame_context =
@@ -482,19 +485,16 @@
 
   if (!scope_info->HasContext()) return local_scope;
 
-  // Third fill all context locals.
+  // Fill all context locals.
   Handle<Context> function_context(frame_context->closure_context());
   CopyContextLocalsToScopeObject(scope_info, function_context, local_scope);
 
   // Finally copy any properties from the function context extension.
   // These will be variables introduced by eval.
   if (function_context->closure() == *function &&
-      function_context->has_extension() &&
       !function_context->IsNativeContext()) {
-    bool success = CopyContextExtensionToScopeObject(
-        handle(function_context->extension_object(), isolate_), local_scope,
-        INCLUDE_PROTOS);
-    if (!success) return MaybeHandle<JSObject>();
+    CopyContextExtensionToScopeObject(function_context, local_scope,
+                                      INCLUDE_PROTOS);
   }
 
   return local_scope;
@@ -513,19 +513,14 @@
   // Allocate and initialize a JSObject with all the content of this function
   // closure.
   Handle<JSObject> closure_scope =
-      isolate_->factory()->NewJSObject(isolate_->object_function());
+      isolate_->factory()->NewJSObjectWithNullProto();
 
   // Fill all context locals to the context extension.
   CopyContextLocalsToScopeObject(scope_info, context, closure_scope);
 
   // Finally copy any properties from the function context extension. This will
   // be variables introduced by eval.
-  if (context->has_extension()) {
-    bool success = CopyContextExtensionToScopeObject(
-        handle(context->extension_object(), isolate_), closure_scope, OWN_ONLY);
-    DCHECK(success);
-    USE(success);
-  }
+  CopyContextExtensionToScopeObject(context, closure_scope, OWN_ONLY);
 
   return closure_scope;
 }
@@ -540,7 +535,7 @@
   Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
                                isolate_);
   Handle<JSObject> catch_scope =
-      isolate_->factory()->NewJSObject(isolate_->object_function());
+      isolate_->factory()->NewJSObjectWithNullProto();
   JSObject::SetOwnPropertyIgnoreAttributes(catch_scope, name, thrown_object,
                                            NONE)
       .Check();
@@ -560,14 +555,14 @@
 
 // Create a plain JSObject which materializes the block scope for the specified
 // block context.
-Handle<JSObject> ScopeIterator::MaterializeBlockScope() {
-  Handle<JSObject> block_scope =
-      isolate_->factory()->NewJSObject(isolate_->object_function());
+Handle<JSObject> ScopeIterator::MaterializeInnerScope() {
+  Handle<JSObject> inner_scope =
+      isolate_->factory()->NewJSObjectWithNullProto();
 
   Handle<Context> context = Handle<Context>::null();
   if (!nested_scope_chain_.is_empty()) {
     Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
-    frame_inspector_->MaterializeStackLocals(block_scope, scope_info);
+    frame_inspector_->MaterializeStackLocals(inner_scope, scope_info);
     if (scope_info->HasContext()) context = CurrentContext();
   } else {
     context = CurrentContext();
@@ -575,17 +570,10 @@
 
   if (!context.is_null()) {
     // Fill all context locals.
-    CopyContextLocalsToScopeObject(handle(context->scope_info()),
-                                   context, block_scope);
-    // Fill all extension variables.
-    if (context->extension_object() != nullptr) {
-      bool success = CopyContextExtensionToScopeObject(
-          handle(context->extension_object()), block_scope, OWN_ONLY);
-      DCHECK(success);
-      USE(success);
-    }
+    CopyContextLocalsToScopeObject(CurrentScopeInfo(), context, inner_scope);
+    CopyContextExtensionToScopeObject(context, inner_scope, OWN_ONLY);
   }
-  return block_scope;
+  return inner_scope;
 }
 
 
@@ -599,7 +587,7 @@
   // Allocate and initialize a JSObject with all the members of the debugged
   // module.
   Handle<JSObject> module_scope =
-      isolate_->factory()->NewJSObject(isolate_->object_function());
+      isolate_->factory()->NewJSObjectWithNullProto();
 
   // Fill all context locals.
   CopyContextLocalsToScopeObject(scope_info, context, module_scope);
@@ -607,12 +595,43 @@
   return module_scope;
 }
 
+bool ScopeIterator::SetParameterValue(Handle<ScopeInfo> scope_info,
+                                      JavaScriptFrame* frame,
+                                      Handle<String> parameter_name,
+                                      Handle<Object> new_value) {
+  // Setting stack locals of optimized frames is not supported.
+  if (frame->is_optimized()) return false;
+  HandleScope scope(isolate_);
+  for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+    if (String::Equals(handle(scope_info->ParameterName(i)), parameter_name)) {
+      frame->SetParameterValue(i, *new_value);
+      return true;
+    }
+  }
+  return false;
+}
 
-// Set the context local variable value.
-bool ScopeIterator::SetContextLocalValue(Handle<ScopeInfo> scope_info,
-                                         Handle<Context> context,
-                                         Handle<String> variable_name,
-                                         Handle<Object> new_value) {
+bool ScopeIterator::SetStackVariableValue(Handle<ScopeInfo> scope_info,
+                                          JavaScriptFrame* frame,
+                                          Handle<String> variable_name,
+                                          Handle<Object> new_value) {
+  // Setting stack locals of optimized frames is not supported.
+  if (frame->is_optimized()) return false;
+  HandleScope scope(isolate_);
+  for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+    if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
+      frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
+      return true;
+    }
+  }
+  return false;
+}
+
+bool ScopeIterator::SetContextVariableValue(Handle<ScopeInfo> scope_info,
+                                            Handle<Context> context,
+                                            Handle<String> variable_name,
+                                            Handle<Object> new_value) {
+  HandleScope scope(isolate_);
   for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
     Handle<String> next_name(scope_info->ContextLocalName(i));
     if (String::Equals(variable_name, next_name)) {
@@ -626,128 +645,8 @@
     }
   }
 
-  return false;
-}
-
-
-bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
-                                          Handle<Object> new_value) {
-  JavaScriptFrame* frame = GetFrame();
-  // Optimized frames are not supported.
-  if (frame->is_optimized()) return false;
-
-  Handle<JSFunction> function(frame->function());
-  Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<ScopeInfo> scope_info(shared->scope_info());
-
-  bool default_result = false;
-
-  // Parameters.
-  for (int i = 0; i < scope_info->ParameterCount(); ++i) {
-    HandleScope scope(isolate_);
-    if (String::Equals(handle(scope_info->ParameterName(i)), variable_name)) {
-      frame->SetParameterValue(i, *new_value);
-      // Argument might be shadowed in heap context, don't stop here.
-      default_result = true;
-    }
-  }
-
-  // Stack locals.
-  for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
-    HandleScope scope(isolate_);
-    if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
-      frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
-      return true;
-    }
-  }
-
-  if (scope_info->HasContext()) {
-    // Context locals.
-    Handle<Context> frame_context(Context::cast(frame->context()));
-    Handle<Context> function_context(frame_context->declaration_context());
-    if (SetContextLocalValue(scope_info, function_context, variable_name,
-                             new_value)) {
-      return true;
-    }
-
-    // Function context extension. These are variables introduced by eval.
-    if (function_context->closure() == *function) {
-      if (function_context->has_extension() &&
-          !function_context->IsNativeContext()) {
-        Handle<JSObject> ext(function_context->extension_object());
-
-        Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
-        DCHECK(maybe.IsJust());
-        if (maybe.FromJust()) {
-          // We don't expect this to do anything except replacing
-          // property value.
-          Runtime::SetObjectProperty(isolate_, ext, variable_name, new_value,
-                                     SLOPPY)
-              .Assert();
-          return true;
-        }
-      }
-    }
-  }
-
-  return default_result;
-}
-
-
-bool ScopeIterator::SetBlockVariableValue(Handle<String> variable_name,
-                                          Handle<Object> new_value) {
-  Handle<ScopeInfo> scope_info = CurrentScopeInfo();
-  JavaScriptFrame* frame = GetFrame();
-
-  for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
-    HandleScope scope(isolate_);
-    if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
-      frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
-      return true;
-    }
-  }
-
-  if (HasContext()) {
-    Handle<Context> context = CurrentContext();
-    if (SetContextLocalValue(scope_info, context, variable_name, new_value)) {
-      return true;
-    }
-
-    Handle<JSObject> ext(context->extension_object(), isolate_);
-    if (!ext.is_null()) {
-      Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
-      DCHECK(maybe.IsJust());
-      if (maybe.FromJust()) {
-        // We don't expect this to do anything except replacing property value.
-        JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
-                                                 NONE)
-            .Check();
-        return true;
-      }
-    }
-  }
-
-  return false;
-}
-
-
-// This method copies structure of MaterializeClosure method above.
-bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
-                                            Handle<Object> new_value) {
-  Handle<Context> context = CurrentContext();
-  DCHECK(context->IsFunctionContext());
-
-  // Context locals to the context extension.
-  Handle<SharedFunctionInfo> shared(context->closure()->shared());
-  Handle<ScopeInfo> scope_info(shared->scope_info());
-  if (SetContextLocalValue(scope_info, context, variable_name, new_value)) {
-    return true;
-  }
-
-  // Properties from the function context extension. This will
-  // be variables introduced by eval.
   if (context->has_extension()) {
-    Handle<JSObject> ext(JSObject::cast(context->extension_object()));
+    Handle<JSObject> ext(context->extension_object());
     Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
     DCHECK(maybe.IsJust());
     if (maybe.FromJust()) {
@@ -762,6 +661,55 @@
   return false;
 }
 
+bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
+                                          Handle<Object> new_value) {
+  JavaScriptFrame* frame = GetFrame();
+  Handle<ScopeInfo> scope_info(frame->function()->shared()->scope_info());
+
+  // Parameter might be shadowed in context. Don't stop here.
+  bool result = SetParameterValue(scope_info, frame, variable_name, new_value);
+
+  // Stack locals.
+  if (SetStackVariableValue(scope_info, frame, variable_name, new_value)) {
+    return true;
+  }
+
+  if (scope_info->HasContext() &&
+      SetContextVariableValue(scope_info, CurrentContext(), variable_name,
+                              new_value)) {
+    return true;
+  }
+
+  return result;
+}
+
+bool ScopeIterator::SetInnerScopeVariableValue(Handle<String> variable_name,
+                                               Handle<Object> new_value) {
+  Handle<ScopeInfo> scope_info = CurrentScopeInfo();
+  DCHECK(scope_info->scope_type() == BLOCK_SCOPE ||
+         scope_info->scope_type() == EVAL_SCOPE);
+  JavaScriptFrame* frame = GetFrame();
+
+  // Setting stack locals of optimized frames is not supported.
+  if (SetStackVariableValue(scope_info, frame, variable_name, new_value)) {
+    return true;
+  }
+
+  if (HasContext() && SetContextVariableValue(scope_info, CurrentContext(),
+                                              variable_name, new_value)) {
+    return true;
+  }
+
+  return false;
+}
+
+// This method copies structure of MaterializeClosure method above.
+bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
+                                            Handle<Object> new_value) {
+  DCHECK(CurrentContext()->IsFunctionContext());
+  return SetContextVariableValue(CurrentScopeInfo(), CurrentContext(),
+                                 variable_name, new_value);
+}
 
 bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
                                            Handle<Object> new_value) {
@@ -780,7 +728,6 @@
   return false;
 }
 
-
 bool ScopeIterator::SetCatchVariableValue(Handle<String> variable_name,
                                           Handle<Object> new_value) {
   Handle<Context> context = CurrentContext();
@@ -801,48 +748,47 @@
   int local_count = scope_info->ContextLocalCount();
   if (local_count == 0) return;
   // Fill all context locals to the context extension.
-  int first_context_var = scope_info->StackLocalCount();
-  int start = scope_info->ContextLocalNameEntriesIndex();
   for (int i = 0; i < local_count; ++i) {
-    if (scope_info->LocalIsSynthetic(first_context_var + i)) continue;
+    Handle<String> name(scope_info->ContextLocalName(i));
+    if (ScopeInfo::VariableIsSynthetic(*name)) continue;
     int context_index = Context::MIN_CONTEXT_SLOTS + i;
     Handle<Object> value = Handle<Object>(context->get(context_index), isolate);
     // Reflect variables under TDZ as undefined in scope object.
     if (value->IsTheHole()) continue;
     // This should always succeed.
     // TODO(verwaest): Use AddDataProperty instead.
-    JSObject::SetOwnPropertyIgnoreAttributes(
-        scope_object, handle(String::cast(scope_info->get(i + start))), value,
-        NONE)
+    JSObject::SetOwnPropertyIgnoreAttributes(scope_object, name, value, NONE)
         .Check();
   }
 }
 
-bool ScopeIterator::CopyContextExtensionToScopeObject(
-    Handle<JSObject> extension, Handle<JSObject> scope_object,
+void ScopeIterator::CopyContextExtensionToScopeObject(
+    Handle<Context> context, Handle<JSObject> scope_object,
     KeyCollectionType type) {
-  Handle<FixedArray> keys;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate_, keys, JSReceiver::GetKeys(extension, type, ENUMERABLE_STRINGS),
-      false);
+  if (context->extension_object() == nullptr) return;
+  Handle<JSObject> extension(context->extension_object());
+  Handle<FixedArray> keys =
+      JSReceiver::GetKeys(extension, type, ENUMERABLE_STRINGS)
+          .ToHandleChecked();
 
   for (int i = 0; i < keys->length(); i++) {
     // Names of variables introduced by eval are strings.
     DCHECK(keys->get(i)->IsString());
     Handle<String> key(String::cast(keys->get(i)));
-    Handle<Object> value;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate_, value, Object::GetPropertyOrElement(extension, key), false);
-    RETURN_ON_EXCEPTION_VALUE(
-        isolate_, JSObject::SetOwnPropertyIgnoreAttributes(
-            scope_object, key, value, NONE), false);
+    Handle<Object> value =
+        Object::GetPropertyOrElement(extension, key).ToHandleChecked();
+    JSObject::SetOwnPropertyIgnoreAttributes(scope_object, key, value, NONE)
+        .Check();
   }
-  return true;
 }
 
 void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
                                         int position) {
-  if (!scope->is_eval_scope()) {
+  if (scope->is_hidden()) {
+    // We need to add this chain element in case the scope has a context
+    // associated. We need to keep the scope chain and context chain in sync.
+    nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate)));
+  } else {
     nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate),
                                               scope->start_position(),
                                               scope->end_position()));
@@ -851,7 +797,7 @@
     Scope* inner_scope = scope->inner_scopes()->at(i);
     int beg_pos = inner_scope->start_position();
     int end_pos = inner_scope->end_position();
-    DCHECK(beg_pos >= 0 && end_pos >= 0);
+    DCHECK((beg_pos >= 0 && end_pos >= 0) || inner_scope->is_hidden());
     if (beg_pos <= position && position < end_pos) {
       GetNestedScopeChain(isolate, inner_scope, position);
       return;
diff --git a/src/debug/debug-scopes.h b/src/debug/debug-scopes.h
index 4e95fc4..9560227 100644
--- a/src/debug/debug-scopes.h
+++ b/src/debug/debug-scopes.h
@@ -25,6 +25,7 @@
     ScopeTypeCatch,
     ScopeTypeBlock,
     ScopeTypeScript,
+    ScopeTypeEval,
     ScopeTypeModule
   };
 
@@ -85,9 +86,12 @@
   struct ExtendedScopeInfo {
     ExtendedScopeInfo(Handle<ScopeInfo> info, int start, int end)
         : scope_info(info), start_position(start), end_position(end) {}
+    explicit ExtendedScopeInfo(Handle<ScopeInfo> info)
+        : scope_info(info), start_position(-1), end_position(-1) {}
     Handle<ScopeInfo> scope_info;
     int start_position;
     int end_position;
+    bool is_hidden() { return start_position == -1 && end_position == -1; }
   };
 
   Isolate* isolate_;
@@ -117,28 +121,37 @@
   MUST_USE_RESULT MaybeHandle<JSObject> MaterializeModuleScope();
   Handle<JSObject> MaterializeClosure();
   Handle<JSObject> MaterializeCatchScope();
-  Handle<JSObject> MaterializeBlockScope();
+  Handle<JSObject> MaterializeInnerScope();
   Handle<JSObject> WithContextExtension();
 
   bool SetLocalVariableValue(Handle<String> variable_name,
                              Handle<Object> new_value);
-  bool SetBlockVariableValue(Handle<String> variable_name,
-                             Handle<Object> new_value);
+  bool SetInnerScopeVariableValue(Handle<String> variable_name,
+                                  Handle<Object> new_value);
   bool SetClosureVariableValue(Handle<String> variable_name,
                                Handle<Object> new_value);
   bool SetScriptVariableValue(Handle<String> variable_name,
                               Handle<Object> new_value);
   bool SetCatchVariableValue(Handle<String> variable_name,
                              Handle<Object> new_value);
-  bool SetContextLocalValue(Handle<ScopeInfo> scope_info,
-                            Handle<Context> context,
-                            Handle<String> variable_name,
-                            Handle<Object> new_value);
+
+  // Helper functions.
+  bool SetParameterValue(Handle<ScopeInfo> scope_info, JavaScriptFrame* frame,
+                         Handle<String> parameter_name,
+                         Handle<Object> new_value);
+  bool SetStackVariableValue(Handle<ScopeInfo> scope_info,
+                             JavaScriptFrame* frame,
+                             Handle<String> variable_name,
+                             Handle<Object> new_value);
+  bool SetContextVariableValue(Handle<ScopeInfo> scope_info,
+                               Handle<Context> context,
+                               Handle<String> variable_name,
+                               Handle<Object> new_value);
 
   void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
                                       Handle<Context> context,
                                       Handle<JSObject> scope_object);
-  bool CopyContextExtensionToScopeObject(Handle<JSObject> extension,
+  void CopyContextExtensionToScopeObject(Handle<Context> context,
                                          Handle<JSObject> scope_object,
                                          KeyCollectionType type);
 
diff --git a/src/debug/debug.cc b/src/debug/debug.cc
index 6e94012..3b5fb5f 100644
--- a/src/debug/debug.cc
+++ b/src/debug/debug.cc
@@ -260,12 +260,6 @@
   return it->GetBreakLocation();
 }
 
-FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
-  List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-  frame->Summarize(&frames);
-  return frames.first();
-}
-
 int CallOffsetFromCodeOffset(int code_offset, bool is_interpreted) {
   // Code offset points to the instruction after the call. Subtract 1 to
   // exclude that instruction from the search. For bytecode, the code offset
@@ -275,7 +269,7 @@
 
 BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
                                        JavaScriptFrame* frame) {
-  FrameSummary summary = GetFirstFrameSummary(frame);
+  FrameSummary summary = FrameSummary::GetFirst(frame);
   int call_offset =
       CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
   return FromCodeOffset(debug_info, call_offset);
@@ -631,7 +625,7 @@
       step_break = location.IsTailCall();
     // Fall through.
     case StepIn: {
-      FrameSummary summary = GetFirstFrameSummary(frame);
+      FrameSummary summary = FrameSummary::GetFirst(frame);
       int offset = summary.code_offset();
       step_break = step_break || location.IsReturn() ||
                    (current_fp != last_fp) ||
@@ -962,6 +956,14 @@
     it.Advance();
   }
 
+  if (last_step_action() == StepNext) {
+    while (!it.done()) {
+      Address current_fp = it.frame()->UnpaddedFP();
+      if (current_fp >= thread_local_.target_fp_) break;
+      it.Advance();
+    }
+  }
+
   // Find the closest Javascript frame we can flood with one-shots.
   while (!it.done() &&
          !it.frame()->function()->shared()->IsSubjectToDebugging()) {
@@ -1011,7 +1013,7 @@
   }
 
   // Get the debug info (create it if it does not exist).
-  FrameSummary summary = GetFirstFrameSummary(frame);
+  FrameSummary summary = FrameSummary::GetFirst(frame);
   Handle<JSFunction> function(summary.function());
   Handle<SharedFunctionInfo> shared(function->shared());
   if (!EnsureDebugInfo(shared, function)) {
@@ -1022,7 +1024,7 @@
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
   // Refresh frame summary if the code has been recompiled for debugging.
   if (AbstractCode::cast(shared->code()) != *summary.abstract_code()) {
-    summary = GetFirstFrameSummary(frame);
+    summary = FrameSummary::GetFirst(frame);
   }
 
   int call_offset =
@@ -1604,7 +1606,7 @@
   if (!shared->HasDebugInfo()) return false;
 
   DCHECK(!frame->is_optimized());
-  FrameSummary summary = GetFirstFrameSummary(frame);
+  FrameSummary summary = FrameSummary::GetFirst(frame);
 
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
   BreakLocation location =
@@ -1656,21 +1658,6 @@
 }
 
 
-void Debug::RecordEvalCaller(Handle<Script> script) {
-  script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
-  // For eval scripts add information on the function from which eval was
-  // called.
-  StackTraceFrameIterator it(script->GetIsolate());
-  if (!it.done()) {
-    script->set_eval_from_shared(it.frame()->function()->shared());
-    Code* code = it.frame()->LookupCode();
-    int offset = static_cast<int>(
-        it.frame()->pc() - code->instruction_start());
-    script->set_eval_from_instructions_offset(offset);
-  }
-}
-
-
 MaybeHandle<Object> Debug::MakeExecutionState() {
   // Create the execution state object.
   Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()) };
@@ -1911,7 +1898,7 @@
                               exec_state,
                               event_data,
                               event_listener_data_ };
-    Handle<JSReceiver> global(isolate_->global_proxy());
+    Handle<JSReceiver> global = isolate_->global_proxy();
     Execution::TryCall(isolate_, Handle<JSFunction>::cast(event_listener_),
                        global, arraysize(argv), argv);
   }
@@ -2260,7 +2247,7 @@
   JavaScriptFrameIterator iterator(isolate_);
   if (iterator.done()) return;
   JavaScriptFrame* frame = iterator.frame();
-  FrameSummary summary = GetFirstFrameSummary(frame);
+  FrameSummary summary = FrameSummary::GetFirst(frame);
   int source_position =
       summary.abstract_code()->SourcePosition(summary.code_offset());
   Handle<Object> script_obj(summary.function()->shared()->script(), isolate_);
@@ -2271,8 +2258,10 @@
     Handle<Script> script = Handle<Script>::cast(script_obj);
     Handle<String> source(String::cast(script->source()));
     Script::InitLineEnds(script);
-    int line = Script::GetLineNumber(script, source_position);
-    int column = Script::GetColumnNumber(script, source_position);
+    int line =
+        Script::GetLineNumber(script, source_position) - script->line_offset();
+    int column = Script::GetColumnNumber(script, source_position) -
+                 (line == 0 ? script->column_offset() : 0);
     Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
     int line_start =
         line == 0 ? 0 : Smi::cast(line_ends->get(line - 1))->value() + 1;
diff --git a/src/debug/debug.h b/src/debug/debug.h
index 501de63..2cdc151 100644
--- a/src/debug/debug.h
+++ b/src/debug/debug.h
@@ -498,9 +498,6 @@
   static int ArchiveSpacePerThread();
   void FreeThreadResources() { }
 
-  // Record function from which eval was called.
-  static void RecordEvalCaller(Handle<Script> script);
-
   bool CheckExecutionState(int id) {
     return is_active() && !debug_context().is_null() && break_id() != 0 &&
            break_id() == id;
diff --git a/src/debug/debug.js b/src/debug/debug.js
index 7f06ca1..38934b0 100644
--- a/src/debug/debug.js
+++ b/src/debug/debug.js
@@ -894,10 +894,6 @@
   return %GetFrameCount(this.break_id);
 };
 
-ExecutionState.prototype.threadCount = function() {
-  return %GetThreadCount(this.break_id);
-};
-
 ExecutionState.prototype.frame = function(opt_index) {
   // If no index supplied return the selected frame.
   if (opt_index == null) opt_index = this.selected_frame;
@@ -2173,28 +2169,6 @@
 };
 
 
-DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
-  // Get the number of threads.
-  var total_threads = this.exec_state_.threadCount();
-
-  // Get information for all threads.
-  var threads = [];
-  for (var i = 0; i < total_threads; i++) {
-    var details = %GetThreadDetails(this.exec_state_.break_id, i);
-    var thread_info = { current: details[0],
-                        id: details[1]
-                      };
-    threads.push(thread_info);
-  }
-
-  // Create the response body.
-  response.body = {
-    totalThreads: total_threads,
-    threads: threads
-  };
-};
-
-
 DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
   response.running = false;
 };
@@ -2360,7 +2334,6 @@
     "references":           proto.referencesRequest_,
     "source":               proto.sourceRequest_,
     "scripts":              proto.scriptsRequest_,
-    "threads":              proto.threadsRequest_,
     "suspend":              proto.suspendRequest_,
     "version":              proto.versionRequest_,
     "changelive":           proto.changeLiveRequest_,
diff --git a/src/debug/liveedit.cc b/src/debug/liveedit.cc
index 78ed6f1..50d60a1 100644
--- a/src/debug/liveedit.cc
+++ b/src/debug/liveedit.cc
@@ -13,6 +13,7 @@
 #include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/global-handles.h"
+#include "src/interpreter/source-position-table.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
 #include "src/parsing/parser.h"
@@ -623,6 +624,8 @@
 
 void FunctionInfoWrapper::SetFunctionCode(Handle<Code> function_code,
                                           Handle<HeapObject> code_scope_info) {
+  // CompileForLiveEdit must deliver full-codegen code.
+  DCHECK(function_code->kind() == Code::FUNCTION);
   Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
   this->SetField(kCodeOffset_, code_wrapper);
 
@@ -688,115 +691,6 @@
 }
 
 
-class FunctionInfoListener {
- public:
-  explicit FunctionInfoListener(Isolate* isolate) {
-    current_parent_index_ = -1;
-    len_ = 0;
-    result_ = isolate->factory()->NewJSArray(10);
-  }
-
-  void FunctionStarted(FunctionLiteral* fun) {
-    HandleScope scope(isolate());
-    FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate());
-    info.SetInitialProperties(fun->name(), fun->start_position(),
-                              fun->end_position(), fun->parameter_count(),
-                              fun->materialized_literal_count(),
-                              current_parent_index_);
-    current_parent_index_ = len_;
-    SetElementSloppy(result_, len_, info.GetJSArray());
-    len_++;
-  }
-
-  void FunctionDone() {
-    HandleScope scope(isolate());
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(
-        *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
-             .ToHandleChecked());
-    current_parent_index_ = info.GetParentIndex();
-  }
-
-  // Saves only function code, because for a script function we
-  // may never create a SharedFunctionInfo object.
-  void FunctionCode(Handle<Code> function_code) {
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(
-        *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
-             .ToHandleChecked());
-    info.SetFunctionCode(function_code,
-                         Handle<HeapObject>(isolate()->heap()->null_value()));
-  }
-
-  // Saves full information about a function: its code, its scope info
-  // and a SharedFunctionInfo object.
-  void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope,
-                    Zone* zone) {
-    if (!shared->IsSharedFunctionInfo()) {
-      return;
-    }
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(
-        *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
-             .ToHandleChecked());
-    info.SetFunctionCode(Handle<Code>(shared->code()),
-                         Handle<HeapObject>(shared->scope_info()));
-    info.SetSharedFunctionInfo(shared);
-
-    Handle<Object> scope_info_list = SerializeFunctionScope(scope, zone);
-    info.SetFunctionScopeInfo(scope_info_list);
-  }
-
-  Handle<JSArray> GetResult() { return result_; }
-
- private:
-  Isolate* isolate() const { return result_->GetIsolate(); }
-
-  Handle<Object> SerializeFunctionScope(Scope* scope, Zone* zone) {
-    Handle<JSArray> scope_info_list = isolate()->factory()->NewJSArray(10);
-    int scope_info_length = 0;
-
-    // Saves some description of scope. It stores name and indexes of
-    // variables in the whole scope chain. Null-named slots delimit
-    // scopes of this chain.
-    Scope* current_scope = scope;
-    while (current_scope != NULL) {
-      HandleScope handle_scope(isolate());
-      ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone);
-      ZoneList<Variable*> context_list(
-          current_scope->ContextLocalCount(), zone);
-      ZoneList<Variable*> globals_list(current_scope->ContextGlobalCount(),
-                                       zone);
-      current_scope->CollectStackAndContextLocals(&stack_list, &context_list,
-                                                  &globals_list);
-      context_list.Sort(&Variable::CompareIndex);
-
-      for (int i = 0; i < context_list.length(); i++) {
-        SetElementSloppy(scope_info_list,
-                         scope_info_length,
-                         context_list[i]->name());
-        scope_info_length++;
-        SetElementSloppy(
-            scope_info_list,
-            scope_info_length,
-            Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate()));
-        scope_info_length++;
-      }
-      SetElementSloppy(scope_info_list,
-                       scope_info_length,
-                       Handle<Object>(isolate()->heap()->null_value(),
-                                      isolate()));
-      scope_info_length++;
-
-      current_scope = current_scope->outer_scope();
-    }
-
-    return scope_info_list;
-  }
-
-  Handle<JSArray> result_;
-  int len_;
-  int current_parent_index_;
-};
-
-
 void LiveEdit::InitializeThreadLocal(Debug* debug) {
   debug->thread_local_.frame_drop_mode_ = LiveEdit::FRAMES_UNTOUCHED;
 }
@@ -832,11 +726,10 @@
                                                  Handle<String> source) {
   Isolate* isolate = script->GetIsolate();
 
-  FunctionInfoListener listener(isolate);
+  MaybeHandle<JSArray> infos;
   Handle<Object> original_source =
       Handle<Object>(script->source(), isolate);
   script->set_source(*source);
-  isolate->set_active_function_info_listener(&listener);
 
   {
     // Creating verbose TryCatch from public API is currently the only way to
@@ -845,7 +738,7 @@
     try_catch.SetVerbose(true);
 
     // A logical 'try' section.
-    Compiler::CompileForLiveEdit(script);
+    infos = Compiler::CompileForLiveEdit(script);
   }
 
   // A logical 'catch' section.
@@ -883,11 +776,10 @@
   }
 
   // A logical 'finally' section.
-  isolate->set_active_function_info_listener(NULL);
   script->set_source(*original_source);
 
   if (rethrow_exception.is_null()) {
-    return listener.GetResult();
+    return infos.ToHandleChecked();
   } else {
     return isolate->Throw<JSArray>(rethrow_exception);
   }
@@ -1116,9 +1008,23 @@
 
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
 
-  if (shared_info->code()->kind() == Code::FUNCTION) {
-    Handle<Code> code = compile_info_wrapper.GetFunctionCode();
-    ReplaceCodeObject(Handle<Code>(shared_info->code()), code);
+  if (shared_info->is_compiled()) {
+    Handle<Code> new_code = compile_info_wrapper.GetFunctionCode();
+    Handle<Code> old_code(shared_info->code());
+    if (shared_info->HasBytecodeArray()) {
+      // The old code is interpreted. If we clear the bytecode array, the
+      // interpreter entry trampoline will self-heal and go to compiled code.
+      shared_info->ClearBytecodeArray();
+      shared_info->ReplaceCode(*new_code);
+    } else {
+      DCHECK(old_code->kind() == Code::FUNCTION);
+      ReplaceCodeObject(old_code, new_code);
+    }
+    if (shared_info->HasDebugInfo()) {
+      // Existing break points will be re-applied. Reset the debug info here.
+      isolate->debug()->RemoveDebugInfoAndClearFromShared(
+          handle(shared_info->GetDebugInfo()));
+    }
     Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
     if (code_scope_info->IsFixedArray()) {
       shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
@@ -1282,12 +1188,11 @@
   static const int kMaximalBufferSize = 512*MB;
 };
 
-
+namespace {
 // Patch positions in code (changes relocation info section) and possibly
 // returns new instance of code.
-static Handle<Code> PatchPositionsInCode(
-    Handle<Code> code,
-    Handle<JSArray> position_change_array) {
+Handle<Code> PatchPositionsInCode(Handle<Code> code,
+                                  Handle<JSArray> position_change_array) {
   Isolate* isolate = code->GetIsolate();
 
   RelocInfoBuffer buffer_writer(code->relocation_size(),
@@ -1328,6 +1233,24 @@
   }
 }
 
+void PatchPositionsInBytecodeArray(Handle<BytecodeArray> bytecode,
+                                   Handle<JSArray> position_change_array) {
+  Isolate* isolate = bytecode->GetIsolate();
+  Zone zone(isolate->allocator());
+  interpreter::SourcePositionTableBuilder builder(isolate, &zone);
+
+  for (interpreter::SourcePositionTableIterator iterator(
+           bytecode->source_position_table());
+       !iterator.done(); iterator.Advance()) {
+    int position = iterator.source_position();
+    int new_position = TranslatePosition(position, position_change_array);
+    builder.AddPosition(iterator.bytecode_offset(), new_position,
+                        iterator.is_statement());
+  }
+
+  bytecode->set_source_position_table(*builder.ToSourcePositionTable());
+}
+}  // namespace
 
 void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
                                       Handle<JSArray> position_change_array) {
@@ -1358,6 +1281,9 @@
       // untouched).
       ReplaceCodeObject(Handle<Code>(info->code()), patched_code);
     }
+  } else if (info->HasBytecodeArray()) {
+    PatchPositionsInBytecodeArray(Handle<BytecodeArray>(info->bytecode_array()),
+                                  position_change_array);
   }
 }
 
@@ -1374,8 +1300,7 @@
   copy->set_type(original->type());
   copy->set_context_data(original->context_data());
   copy->set_eval_from_shared(original->eval_from_shared());
-  copy->set_eval_from_instructions_offset(
-      original->eval_from_instructions_offset());
+  copy->set_eval_from_position(original->eval_from_position());
 
   // Copy all the flags, but clear compilation state.
   copy->set_flags(original->flags());
@@ -1555,6 +1480,13 @@
     top_frame = frames[top_frame_index - 2];
     *mode = LiveEdit::CURRENTLY_SET_MODE;
     frame_has_padding = false;
+  } else if (pre_top_frame_code->kind() == Code::BYTECODE_HANDLER) {
+    // Interpreted bytecode takes up two stack frames, one for the bytecode
+    // handler and one for the interpreter entry trampoline. Therefore we shift
+    // up by one frame.
+    *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL;
+    pre_top_frame = frames[top_frame_index - 2];
+    top_frame = frames[top_frame_index - 1];
   } else {
     return "Unknown structure of stack above changing function";
   }
@@ -1792,7 +1724,8 @@
   // Adjust break_frame after some frames has been dropped.
   StackFrame::Id new_id = StackFrame::NO_ID;
   for (int i = bottom_js_frame_index + 1; i < frames.length(); i++) {
-    if (frames[i]->type() == StackFrame::JAVA_SCRIPT) {
+    if (frames[i]->type() == StackFrame::JAVA_SCRIPT ||
+        frames[i]->type() == StackFrame::INTERPRETED) {
       new_id = frames[i]->id();
       break;
     }
@@ -2004,40 +1937,107 @@
   return NULL;
 }
 
+Handle<JSArray> LiveEditFunctionTracker::Collect(FunctionLiteral* node,
+                                                 Handle<Script> script,
+                                                 Zone* zone, Isolate* isolate) {
+  LiveEditFunctionTracker visitor(script, zone, isolate);
+  visitor.VisitFunctionLiteral(node);
+  return visitor.result_;
+}
 
-LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
-                                                 FunctionLiteral* fun)
-    : isolate_(isolate) {
-  if (isolate_->active_function_info_listener() != NULL) {
-    isolate_->active_function_info_listener()->FunctionStarted(fun);
+LiveEditFunctionTracker::LiveEditFunctionTracker(Handle<Script> script,
+                                                 Zone* zone, Isolate* isolate)
+    : AstTraversalVisitor(isolate) {
+  current_parent_index_ = -1;
+  isolate_ = isolate;
+  len_ = 0;
+  result_ = isolate->factory()->NewJSArray(10);
+  script_ = script;
+  zone_ = zone;
+}
+
+void LiveEditFunctionTracker::VisitFunctionLiteral(FunctionLiteral* node) {
+  Scope* scope = node->scope();
+
+  // FunctionStarted is called in pre-order.
+  FunctionStarted(node);
+
+  VisitDeclarations(scope->declarations());
+  VisitStatements(node->body());
+
+  // FunctionDone are called in post-order.
+  // TODO(jgruber): If required, replace the (linear cost)
+  // FindSharedFunctionInfo call with a more efficient implementation.
+  Handle<SharedFunctionInfo> info =
+      script_->FindSharedFunctionInfo(node).ToHandleChecked();
+  FunctionDone(info, scope);
+}
+
+void LiveEditFunctionTracker::FunctionStarted(FunctionLiteral* fun) {
+  HandleScope handle_scope(isolate_);
+  FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate_);
+  info.SetInitialProperties(fun->name(), fun->start_position(),
+                            fun->end_position(), fun->parameter_count(),
+                            fun->materialized_literal_count(),
+                            current_parent_index_);
+  current_parent_index_ = len_;
+  SetElementSloppy(result_, len_, info.GetJSArray());
+  len_++;
+}
+
+// Saves full information about a function: its code, its scope info
+// and a SharedFunctionInfo object.
+void LiveEditFunctionTracker::FunctionDone(Handle<SharedFunctionInfo> shared,
+                                           Scope* scope) {
+  HandleScope handle_scope(isolate_);
+  FunctionInfoWrapper info = FunctionInfoWrapper::cast(
+      *JSReceiver::GetElement(isolate_, result_, current_parent_index_)
+           .ToHandleChecked());
+  info.SetFunctionCode(Handle<Code>(shared->code()),
+                       Handle<HeapObject>(shared->scope_info()));
+  info.SetSharedFunctionInfo(shared);
+
+  Handle<Object> scope_info_list = SerializeFunctionScope(scope);
+  info.SetFunctionScopeInfo(scope_info_list);
+
+  current_parent_index_ = info.GetParentIndex();
+}
+
+Handle<Object> LiveEditFunctionTracker::SerializeFunctionScope(Scope* scope) {
+  Handle<JSArray> scope_info_list = isolate_->factory()->NewJSArray(10);
+  int scope_info_length = 0;
+
+  // Saves some description of scope. It stores name and indexes of
+  // variables in the whole scope chain. Null-named slots delimit
+  // scopes of this chain.
+  Scope* current_scope = scope;
+  while (current_scope != NULL) {
+    HandleScope handle_scope(isolate_);
+    ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone_);
+    ZoneList<Variable*> context_list(current_scope->ContextLocalCount(), zone_);
+    ZoneList<Variable*> globals_list(current_scope->ContextGlobalCount(),
+                                     zone_);
+    current_scope->CollectStackAndContextLocals(&stack_list, &context_list,
+                                                &globals_list);
+    context_list.Sort(&Variable::CompareIndex);
+
+    for (int i = 0; i < context_list.length(); i++) {
+      SetElementSloppy(scope_info_list, scope_info_length,
+                       context_list[i]->name());
+      scope_info_length++;
+      SetElementSloppy(
+          scope_info_list, scope_info_length,
+          Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate_));
+      scope_info_length++;
+    }
+    SetElementSloppy(scope_info_list, scope_info_length,
+                     Handle<Object>(isolate_->heap()->null_value(), isolate_));
+    scope_info_length++;
+
+    current_scope = current_scope->outer_scope();
   }
-}
 
-
-LiveEditFunctionTracker::~LiveEditFunctionTracker() {
-  if (isolate_->active_function_info_listener() != NULL) {
-    isolate_->active_function_info_listener()->FunctionDone();
-  }
-}
-
-
-void LiveEditFunctionTracker::RecordFunctionInfo(
-    Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
-    Zone* zone) {
-  if (isolate_->active_function_info_listener() != NULL) {
-    isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope(),
-                                                            zone);
-  }
-}
-
-
-void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
-  isolate_->active_function_info_listener()->FunctionCode(code);
-}
-
-
-bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
-  return isolate->active_function_info_listener() != NULL;
+  return scope_info_list;
 }
 
 }  // namespace internal
diff --git a/src/debug/liveedit.h b/src/debug/liveedit.h
index 67be70e..af74043 100644
--- a/src/debug/liveedit.h
+++ b/src/debug/liveedit.h
@@ -32,26 +32,39 @@
 namespace internal {
 
 // This class collects some specific information on structure of functions
-// in a particular script. It gets called from compiler all the time, but
-// actually records any data only when liveedit operation is in process;
-// in any other time this class is very cheap.
+// in a particular script.
 //
 // The primary interest of the Tracker is to record function scope structures
-// in order to analyze whether function code maybe safely patched (with new
+// in order to analyze whether function code may be safely patched (with new
 // code successfully reading existing data from function scopes). The Tracker
 // also collects compiled function codes.
-class LiveEditFunctionTracker {
+class LiveEditFunctionTracker : public AstTraversalVisitor {
  public:
-  explicit LiveEditFunctionTracker(Isolate* isolate, FunctionLiteral* fun);
-  ~LiveEditFunctionTracker();
-  void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
-                          FunctionLiteral* lit, Zone* zone);
-  void RecordRootFunctionInfo(Handle<Code> code);
+  // Traverses the entire AST, and records information about all
+  // FunctionLiterals for further use by LiveEdit code patching. The collected
+  // information is returned as a serialized array.
+  static Handle<JSArray> Collect(FunctionLiteral* node, Handle<Script> script,
+                                 Zone* zone, Isolate* isolate);
 
-  static bool IsActive(Isolate* isolate);
+  virtual ~LiveEditFunctionTracker() {}
+  void VisitFunctionLiteral(FunctionLiteral* node) override;
 
  private:
+  LiveEditFunctionTracker(Handle<Script> script, Zone* zone, Isolate* isolate);
+
+  void FunctionStarted(FunctionLiteral* fun);
+  void FunctionDone(Handle<SharedFunctionInfo> shared, Scope* scope);
+  Handle<Object> SerializeFunctionScope(Scope* scope);
+
+  Handle<Script> script_;
+  Zone* zone_;
   Isolate* isolate_;
+
+  Handle<JSArray> result_;
+  int len_;
+  int current_parent_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveEditFunctionTracker);
 };
 
 
diff --git a/src/debug/mirrors.js b/src/debug/mirrors.js
index 881f303..d098c1c 100644
--- a/src/debug/mirrors.js
+++ b/src/debug/mirrors.js
@@ -16,8 +16,8 @@
 var MapEntries;
 var MapIteratorNext;
 var MathMin = global.Math.min;
-var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
-var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
+var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
 var SetIteratorNext;
 var SetValues;
 var SymbolToString;
@@ -115,7 +115,7 @@
 
 function ObjectIsPromise(value) {
   return IS_RECEIVER(value) &&
-         !IS_UNDEFINED(%DebugGetProperty(value, promiseStatusSymbol));
+         !IS_UNDEFINED(%DebugGetProperty(value, promiseStateSymbol));
 }
 
 
@@ -256,13 +256,15 @@
 // A copy of the scope types from runtime-debug.cc.
 // NOTE: these constants should be backward-compatible, so
 // add new ones to the end of this list.
-var ScopeType = { Global: 0,
-                  Local: 1,
-                  With: 2,
+var ScopeType = { Global:  0,
+                  Local:   1,
+                  With:    2,
                   Closure: 3,
-                  Catch: 4,
-                  Block: 5,
-                  Script: 6 };
+                  Catch:   4,
+                  Block:   5,
+                  Script:  6,
+                  Eval:    7,
+                };
 
 /**
  * Base class for all mirror objects.
@@ -1272,7 +1274,7 @@
 
 
 function PromiseGetStatus_(value) {
-  var status = %DebugGetProperty(value, promiseStatusSymbol);
+  var status = %DebugGetProperty(value, promiseStateSymbol);
   if (status == 0) return "pending";
   if (status == 1) return "resolved";
   return "rejected";
@@ -1280,7 +1282,7 @@
 
 
 function PromiseGetValue_(value) {
-  return %DebugGetProperty(value, promiseValueSymbol);
+  return %DebugGetProperty(value, promiseResultSymbol);
 }
 
 
@@ -1408,8 +1410,8 @@
 
 function GeneratorGetStatus_(value) {
   var continuation = %GeneratorGetContinuation(value);
-  if (continuation < 0) return "running";
-  if (continuation == 0) return "closed";
+  if (continuation < -1) return "running";
+  if (continuation == -1) return "closed";
   return "suspended";
 }
 
@@ -1495,6 +1497,12 @@
 };
 
 
+PropertyMirror.prototype.toText = function() {
+  if (IS_SYMBOL(this.name_)) return %SymbolDescriptiveString(this.name_);
+  return this.name_;
+};
+
+
 PropertyMirror.prototype.isIndexed = function() {
   for (var i = 0; i < this.name_.length; i++) {
     if (this.name_[i] < '0' || '9' < this.name_[i]) {
@@ -1529,11 +1537,6 @@
 };
 
 
-PropertyMirror.prototype.insertionIndex = function() {
-  return %DebugPropertyIndexFromDetails(this.details_);
-};
-
-
 /**
  * Returns whether this property has a getter defined through __defineGetter__.
  * @return {booolean} True if this property has a getter
@@ -2027,10 +2030,10 @@
         if (display_receiver) {
           result += '.';
         }
-        result += property.name();
+        result += property.toText();
       } else {
         result += '[';
-        result += property.name();
+        result += property.toText();
         result += ']';
       }
       // Also known as - if the name in the function doesn't match the name
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index b2c5d42..adf4cf1 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -47,7 +47,7 @@
 
 DeoptimizerData::~DeoptimizerData() {
   for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
-    allocator_->Free(deopt_entry_code_[i]);
+    allocator_->Free<MemoryAllocator::kFull>(deopt_entry_code_[i]);
     deopt_entry_code_[i] = NULL;
   }
 }
@@ -228,7 +228,7 @@
   Object* context = isolate->heap()->native_contexts_list();
   while (!context->IsUndefined()) {
     VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+    context = Context::cast(context)->next_context_link();
   }
 }
 
@@ -296,7 +296,9 @@
                          !FLAG_turbo_asm_deoptimization;
       bool safe_to_deopt =
           deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
-      CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned);
+      bool builtin = code->kind() == Code::BUILTIN;
+      CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned ||
+            builtin);
       if (topmost_optimized_code == NULL) {
         topmost_optimized_code = code;
         safe_to_deopt_topmost_optimized_code = safe_to_deopt;
@@ -372,6 +374,8 @@
 
 
 void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::DeoptimizeCode);
   TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
   TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   if (FLAG_trace_deopt) {
@@ -385,12 +389,14 @@
     Context* native_context = Context::cast(context);
     MarkAllCodeForContext(native_context);
     DeoptimizeMarkedCodeForContext(native_context);
-    context = native_context->get(Context::NEXT_CONTEXT_LINK);
+    context = native_context->next_context_link();
   }
 }
 
 
 void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::DeoptimizeCode);
   TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
   TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   if (FLAG_trace_deopt) {
@@ -403,7 +409,7 @@
   while (!context->IsUndefined()) {
     Context* native_context = Context::cast(context);
     DeoptimizeMarkedCodeForContext(native_context);
-    context = native_context->get(Context::NEXT_CONTEXT_LINK);
+    context = native_context->next_context_link();
   }
 }
 
@@ -420,7 +426,10 @@
 
 
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  TimerEventScope<TimerEventDeoptimizeCode> timer(function->GetIsolate());
+  Isolate* isolate = function->GetIsolate();
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::DeoptimizeCode);
+  TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
   TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   Code* code = function->code();
   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
@@ -660,7 +669,7 @@
       length++;
       element = code->next_code_link();
     }
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+    context = Context::cast(context)->next_context_link();
   }
   return length;
 }
@@ -839,9 +848,8 @@
            " @%d => node=%d, pc=0x%08" V8PRIxPTR ", caller sp=0x%08" V8PRIxPTR
            ", state=%s, took %0.3f ms]\n",
            bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
-           caller_frame_top_, FullCodeGenerator::State2String(
-                                  static_cast<FullCodeGenerator::State>(
-                                      output_[index]->GetState()->value())),
+           caller_frame_top_, BailoutStateToString(static_cast<BailoutState>(
+                                  output_[index]->GetState()->value())),
            ms);
   }
 }
@@ -1053,10 +1061,11 @@
 
   // If we are going to the catch handler, then the exception lives in
   // the accumulator.
-  FullCodeGenerator::State state =
-      goto_catch_handler ? FullCodeGenerator::TOS_REG
-                         : FullCodeGenerator::StateField::decode(pc_and_state);
-  output_frame->SetState(Smi::FromInt(state));
+  BailoutState state =
+      goto_catch_handler
+          ? BailoutState::TOS_REGISTER
+          : FullCodeGenerator::BailoutStateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(static_cast<int>(state)));
 
   // Set the continuation for the topmost frame.
   if (is_topmost) {
@@ -1272,7 +1281,9 @@
   Code* dispatch_builtin =
       builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
   output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
-  output_frame->SetState(0);
+  // Restore accumulator (TOS) register.
+  output_frame->SetState(
+      Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
 
   // Update constant pool.
   if (FLAG_enable_embedded_constant_pool) {
@@ -1288,14 +1299,11 @@
 
   // Set the continuation for the topmost frame.
   if (is_topmost) {
-    Code* continuation =
-        builtins->builtin(Builtins::kInterpreterNotifyDeoptimized);
+    Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
     if (bailout_type_ == LAZY) {
-      continuation =
-          builtins->builtin(Builtins::kInterpreterNotifyLazyDeoptimized);
+      continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
     } else if (bailout_type_ == SOFT) {
-      continuation =
-          builtins->builtin(Builtins::kInterpreterNotifySoftDeoptimized);
+      continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
     } else {
       CHECK_EQ(bailout_type_, EAGER);
     }
@@ -1509,7 +1517,7 @@
   // value of result register is preserved during continuation execution.
   // We do this here by "pushing" the result of the constructor function to the
   // top of the reconstructed stack and then using the
-  // FullCodeGenerator::TOS_REG machinery.
+  // BailoutState::TOS_REGISTER machinery.
   if (is_topmost) {
     height_in_bytes += kPointerSize;
   }
@@ -1630,7 +1638,8 @@
     DebugPrintOutputSlot(value, frame_index, output_offset,
                          "constructor result\n");
 
-    output_frame->SetState(Smi::FromInt(FullCodeGenerator::TOS_REG));
+    output_frame->SetState(
+        Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
   }
 
   CHECK_EQ(0u, output_offset);
@@ -1684,7 +1693,7 @@
   // value of result register is preserved during continuation execution.
   // We do this here by "pushing" the result of the accessor function to the
   // top of the reconstructed stack and then using the
-  // FullCodeGenerator::TOS_REG machinery.
+  // BailoutState::TOS_REGISTER machinery.
   // We don't need to restore the result in case of a setter call because we
   // have to return the stored value but not the result of the setter function.
   bool should_preserve_result = is_topmost && !is_setter_stub_frame;
@@ -1803,9 +1812,11 @@
     DebugPrintOutputSlot(value, frame_index, output_offset,
                          "accessor result\n");
 
-    output_frame->SetState(Smi::FromInt(FullCodeGenerator::TOS_REG));
+    output_frame->SetState(
+        Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
   } else {
-    output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+    output_frame->SetState(
+        Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
   }
 
   CHECK_EQ(0u, output_offset);
@@ -2060,7 +2071,8 @@
     output_frame->SetConstantPool(constant_pool_value);
     output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
   }
-  output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+  output_frame->SetState(
+      Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
   Code* notify_failure =
       isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
   output_frame->SetContinuation(
@@ -2639,23 +2651,6 @@
   return it->GetValue();
 }
 
-int ComputeSourcePosition(Handle<SharedFunctionInfo> shared,
-                          BailoutId node_id) {
-  if (shared->HasBytecodeArray()) {
-    BytecodeArray* bytecodes = shared->bytecode_array();
-    // BailoutId points to the next bytecode in the bytecode aray. Subtract
-    // 1 to get the end of current bytecode.
-    return bytecodes->SourcePosition(node_id.ToInt() - 1);
-  } else {
-    Code* non_optimized_code = shared->code();
-    FixedArray* raw_data = non_optimized_code->deoptimization_data();
-    DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
-    unsigned pc_and_state = Deoptimizer::GetOutputInfo(data, node_id, *shared);
-    unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
-    return non_optimized_code->SourcePosition(pc_offset);
-  }
-}
-
 }  // namespace
 
 DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
@@ -2685,8 +2680,8 @@
       parameter_frame != state->begin() &&
       (parameter_frame - 1)->kind() == TranslatedFrame::kConstructStub;
 
-  source_position_ =
-      ComputeSourcePosition(frame_it->shared_info(), frame_it->node_id());
+  source_position_ = Deoptimizer::ComputeSourcePosition(
+      *frame_it->shared_info(), frame_it->node_id());
 
   TranslatedFrame::iterator value_it = frame_it->begin();
   // Get the function. Note that this might materialize the function.
@@ -2750,24 +2745,48 @@
 Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
   SourcePosition last_position = SourcePosition::Unknown();
   Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
+  int last_deopt_id = Deoptimizer::DeoptInfo::kNoDeoptId;
   int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
+             RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
              RelocInfo::ModeMask(RelocInfo::POSITION);
   for (RelocIterator it(code, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
-    if (info->pc() >= pc) return DeoptInfo(last_position, NULL, last_reason);
+    if (info->pc() >= pc) {
+      return DeoptInfo(last_position, last_reason, last_deopt_id);
+    }
     if (info->rmode() == RelocInfo::POSITION) {
       int raw_position = static_cast<int>(info->data());
       last_position = raw_position ? SourcePosition::FromRaw(raw_position)
                                    : SourcePosition::Unknown();
+    } else if (info->rmode() == RelocInfo::DEOPT_ID) {
+      last_deopt_id = static_cast<int>(info->data());
     } else if (info->rmode() == RelocInfo::DEOPT_REASON) {
       last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
     }
   }
-  return DeoptInfo(SourcePosition::Unknown(), NULL, Deoptimizer::kNoReason);
+  return DeoptInfo(SourcePosition::Unknown(), Deoptimizer::kNoReason, -1);
 }
 
 
 // static
+int Deoptimizer::ComputeSourcePosition(SharedFunctionInfo* shared,
+                                       BailoutId node_id) {
+  if (shared->HasBytecodeArray()) {
+    BytecodeArray* bytecodes = shared->bytecode_array();
+    // BailoutId points to the next bytecode in the bytecode aray. Subtract
+    // 1 to get the end of current bytecode.
+    return bytecodes->SourcePosition(node_id.ToInt() - 1);
+  } else {
+    Code* non_optimized_code = shared->code();
+    FixedArray* raw_data = non_optimized_code->deoptimization_data();
+    DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+    unsigned pc_and_state = Deoptimizer::GetOutputInfo(data, node_id, shared);
+    unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+    return non_optimized_code->SourcePosition(pc_offset);
+  }
+}
+
+// static
 TranslatedValue TranslatedValue::NewArgumentsObject(TranslatedState* container,
                                                     int length,
                                                     int object_index) {
@@ -3486,6 +3505,7 @@
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data =
       static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
+  DCHECK(data != nullptr && deopt_index != Safepoint::kNoDeoptimizationIndex);
   TranslationIterator it(data->TranslationByteArray(),
                          data->TranslationIndex(deopt_index)->value());
   Init(frame->fp(), &it, data->LiteralArray(), nullptr /* registers */,
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 21ca84e..1d413e6 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -402,6 +402,22 @@
  public:
   enum BailoutType { EAGER, LAZY, SOFT, kLastBailoutType = SOFT };
 
+  enum class BailoutState {
+    NO_REGISTERS,
+    TOS_REGISTER,
+  };
+
+  static const char* BailoutStateToString(BailoutState state) {
+    switch (state) {
+      case BailoutState::NO_REGISTERS:
+        return "NO_REGISTERS";
+      case BailoutState::TOS_REGISTER:
+        return "TOS_REGISTER";
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
 #define DEOPT_MESSAGES_CONSTANTS(C, T) C,
   enum DeoptReason {
     DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_CONSTANTS) kLastDeoptReason
@@ -410,17 +426,21 @@
   static const char* GetDeoptReason(DeoptReason deopt_reason);
 
   struct DeoptInfo {
-    DeoptInfo(SourcePosition position, const char* m, DeoptReason d)
-        : position(position), mnemonic(m), deopt_reason(d), inlining_id(0) {}
+    DeoptInfo(SourcePosition position, DeoptReason deopt_reason, int deopt_id)
+        : position(position), deopt_reason(deopt_reason), deopt_id(deopt_id) {}
 
     SourcePosition position;
-    const char* mnemonic;
     DeoptReason deopt_reason;
-    int inlining_id;
+    int deopt_id;
+
+    static const int kNoDeoptId = -1;
   };
 
   static DeoptInfo GetDeoptInfo(Code* code, byte* from);
 
+  static int ComputeSourcePosition(SharedFunctionInfo* shared,
+                                   BailoutId node_id);
+
   struct JumpTableEntry : public ZoneObject {
     inline JumpTableEntry(Address entry, const DeoptInfo& deopt_info,
                           Deoptimizer::BailoutType type, bool frame)
diff --git a/src/disassembler.cc b/src/disassembler.cc
index ed9ca9a..c29022a 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -109,10 +109,9 @@
           it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
         // raw pointer embedded in code stream, e.g., jump table
         byte* ptr = *reinterpret_cast<byte**>(pc);
-        SNPrintF(decode_buffer,
-                 "%08" V8PRIxPTR "      jump table entry %4" V8PRIdPTR,
-                 reinterpret_cast<intptr_t>(ptr),
-                 ptr - begin);
+        SNPrintF(
+            decode_buffer, "%08" V8PRIxPTR "      jump table entry %4" PRIuS,
+            reinterpret_cast<intptr_t>(ptr), static_cast<size_t>(ptr - begin));
         pc += sizeof(ptr);
       } else {
         decode_buffer[0] = '\0';
@@ -147,7 +146,7 @@
     }
 
     // Instruction address and instruction offset.
-    out.AddFormatted("%p  %4d  ", prev_pc, prev_pc - begin);
+    out.AddFormatted("%p  %4" V8PRIdPTRDIFF "  ", prev_pc, prev_pc - begin);
 
     // Instruction.
     out.AddFormatted("%s", decode_buffer.start());
@@ -171,15 +170,20 @@
       RelocInfo::Mode rmode = relocinfo.rmode();
       if (RelocInfo::IsPosition(rmode)) {
         if (RelocInfo::IsStatementPosition(rmode)) {
-          out.AddFormatted("    ;; debug: statement %d", relocinfo.data());
+          out.AddFormatted("    ;; debug: statement %" V8PRIdPTR,
+                           relocinfo.data());
         } else {
-          out.AddFormatted("    ;; debug: position %d", relocinfo.data());
+          out.AddFormatted("    ;; debug: position %" V8PRIdPTR,
+                           relocinfo.data());
         }
       } else if (rmode == RelocInfo::DEOPT_REASON) {
         Deoptimizer::DeoptReason reason =
             static_cast<Deoptimizer::DeoptReason>(relocinfo.data());
         out.AddFormatted("    ;; debug: deopt reason '%s'",
                          Deoptimizer::GetDeoptReason(reason));
+      } else if (rmode == RelocInfo::DEOPT_ID) {
+        out.AddFormatted("    ;; debug: deopt index %d",
+                         static_cast<int>(relocinfo.data()));
       } else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
         HeapStringAllocator allocator;
         StringStream accumulator(&allocator);
@@ -203,10 +207,6 @@
           InlineCacheState ic_state = code->ic_state();
           out.AddFormatted(" %s, %s", Code::Kind2String(kind),
               Code::ICState2String(ic_state));
-          if (ic_state == MONOMORPHIC) {
-            Code::StubType type = code->type();
-            out.AddFormatted(", %s", Code::StubType2String(type));
-          }
         } else if (kind == Code::STUB || kind == Code::HANDLER) {
           // Get the STUB key and extract major and minor key.
           uint32_t key = code->stub_key();
diff --git a/src/elements.cc b/src/elements.cc
index 288c60e..6c257ac 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -140,14 +140,11 @@
   if (copy_size == 0) return;
   FixedArray* from = FixedArray::cast(from_base);
   FixedArray* to = FixedArray::cast(to_base);
-  DCHECK(IsFastSmiOrObjectElementsKind(from_kind) ||
-         from_kind == FAST_STRING_WRAPPER_ELEMENTS);
+  DCHECK(IsFastSmiOrObjectElementsKind(from_kind));
   DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
 
   WriteBarrierMode write_barrier_mode =
-      ((IsFastObjectElementsKind(from_kind) &&
-        IsFastObjectElementsKind(to_kind)) ||
-       from_kind == FAST_STRING_WRAPPER_ELEMENTS)
+      (IsFastObjectElementsKind(from_kind) && IsFastObjectElementsKind(to_kind))
           ? UPDATE_WRITE_BARRIER
           : SKIP_WRITE_BARRIER;
   for (int i = 0; i < copy_size; i++) {
@@ -466,8 +463,7 @@
 // http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).  We use
 // CRTP to guarantee aggressive compile time optimizations (i.e.  inlining and
 // specialization of SomeElementsAccessor methods).
-template <typename ElementsAccessorSubclass,
-          typename ElementsTraitsParam>
+template <typename Subclass, typename ElementsTraitsParam>
 class ElementsAccessorBase : public ElementsAccessor {
  public:
   explicit ElementsAccessorBase(const char* name)
@@ -495,12 +491,12 @@
     } else {
       length = fixed_array_base->length();
     }
-    ElementsAccessorSubclass::ValidateContents(holder, length);
+    Subclass::ValidateContents(holder, length);
   }
 
   void Validate(Handle<JSObject> holder) final {
     DisallowHeapAllocation no_gc;
-    ElementsAccessorSubclass::ValidateImpl(holder);
+    Subclass::ValidateImpl(holder);
   }
 
   static bool IsPackedImpl(Handle<JSObject> holder,
@@ -508,8 +504,7 @@
                            uint32_t end) {
     if (IsFastPackedElementsKind(kind())) return true;
     for (uint32_t i = start; i < end; i++) {
-      if (!ElementsAccessorSubclass::HasElementImpl(holder, i, backing_store,
-                                                    ALL_PROPERTIES)) {
+      if (!Subclass::HasElementImpl(holder, i, backing_store, ALL_PROPERTIES)) {
         return false;
       }
     }
@@ -520,8 +515,7 @@
     if (!IsHoleyElementsKind(kind())) return;
     int length = Smi::cast(array->length())->value();
     Handle<FixedArrayBase> backing_store(array->elements());
-    if (!ElementsAccessorSubclass::IsPackedImpl(array, backing_store, 0,
-                                                length)) {
+    if (!Subclass::IsPackedImpl(array, backing_store, 0, length)) {
       return;
     }
     ElementsKind packed_kind = GetPackedElementsKind(kind());
@@ -537,20 +531,18 @@
   bool HasElement(Handle<JSObject> holder, uint32_t index,
                   Handle<FixedArrayBase> backing_store,
                   PropertyFilter filter) final {
-    return ElementsAccessorSubclass::HasElementImpl(holder, index,
-                                                    backing_store, filter);
+    return Subclass::HasElementImpl(holder, index, backing_store, filter);
   }
 
   static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
                              Handle<FixedArrayBase> backing_store,
                              PropertyFilter filter) {
-    return ElementsAccessorSubclass::GetEntryForIndexImpl(
-               *holder, *backing_store, index, filter) != kMaxUInt32;
+    return Subclass::GetEntryForIndexImpl(*holder, *backing_store, index,
+                                          filter) != kMaxUInt32;
   }
 
   bool HasAccessors(JSObject* holder) final {
-    return ElementsAccessorSubclass::HasAccessorsImpl(holder,
-                                                      holder->elements());
+    return Subclass::HasAccessorsImpl(holder, holder->elements());
   }
 
   static bool HasAccessorsImpl(JSObject* holder,
@@ -559,11 +551,11 @@
   }
 
   Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final {
-    return ElementsAccessorSubclass::GetImpl(holder, entry);
+    return Subclass::GetImpl(holder, entry);
   }
 
   static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
-    return ElementsAccessorSubclass::GetImpl(holder->elements(), entry);
+    return Subclass::GetImpl(holder->elements(), entry);
   }
 
   static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
@@ -573,14 +565,13 @@
   }
 
   void Set(Handle<JSObject> holder, uint32_t entry, Object* value) final {
-    ElementsAccessorSubclass::SetImpl(holder, entry, value);
+    Subclass::SetImpl(holder, entry, value);
   }
 
   void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store,
                    uint32_t entry, Handle<Object> value,
                    PropertyAttributes attributes) final {
-    ElementsAccessorSubclass::ReconfigureImpl(object, store, entry, value,
-                                              attributes);
+    Subclass::ReconfigureImpl(object, store, entry, value, attributes);
   }
 
   static void ReconfigureImpl(Handle<JSObject> object,
@@ -592,8 +583,7 @@
 
   void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
            PropertyAttributes attributes, uint32_t new_capacity) final {
-    ElementsAccessorSubclass::AddImpl(object, index, value, attributes,
-                                      new_capacity);
+    Subclass::AddImpl(object, index, value, attributes, new_capacity);
   }
 
   static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -604,7 +594,7 @@
 
   uint32_t Push(Handle<JSArray> receiver, Arguments* args,
                 uint32_t push_size) final {
-    return ElementsAccessorSubclass::PushImpl(receiver, args, push_size);
+    return Subclass::PushImpl(receiver, args, push_size);
   }
 
   static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
@@ -615,7 +605,7 @@
 
   uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
                    uint32_t unshift_size) final {
-    return ElementsAccessorSubclass::UnshiftImpl(receiver, args, unshift_size);
+    return Subclass::UnshiftImpl(receiver, args, unshift_size);
   }
 
   static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
@@ -626,7 +616,7 @@
 
   Handle<JSArray> Slice(Handle<JSObject> receiver, uint32_t start,
                         uint32_t end) final {
-    return ElementsAccessorSubclass::SliceImpl(receiver, start, end);
+    return Subclass::SliceImpl(receiver, start, end);
   }
 
   static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
@@ -638,8 +628,7 @@
   Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start,
                          uint32_t delete_count, Arguments* args,
                          uint32_t add_count) final {
-    return ElementsAccessorSubclass::SpliceImpl(receiver, start, delete_count,
-                                                args, add_count);
+    return Subclass::SpliceImpl(receiver, start, delete_count, args, add_count);
   }
 
   static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
@@ -650,7 +639,7 @@
   }
 
   Handle<Object> Pop(Handle<JSArray> receiver) final {
-    return ElementsAccessorSubclass::PopImpl(receiver);
+    return Subclass::PopImpl(receiver);
   }
 
   static Handle<Object> PopImpl(Handle<JSArray> receiver) {
@@ -659,7 +648,7 @@
   }
 
   Handle<Object> Shift(Handle<JSArray> receiver) final {
-    return ElementsAccessorSubclass::ShiftImpl(receiver);
+    return Subclass::ShiftImpl(receiver);
   }
 
   static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
@@ -668,8 +657,8 @@
   }
 
   void SetLength(Handle<JSArray> array, uint32_t length) final {
-    ElementsAccessorSubclass::SetLengthImpl(array->GetIsolate(), array, length,
-                                            handle(array->elements()));
+    Subclass::SetLengthImpl(array->GetIsolate(), array, length,
+                            handle(array->elements()));
   }
 
   static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
@@ -713,7 +702,7 @@
     } else {
       // Check whether the backing store should be expanded.
       capacity = Max(length, JSObject::NewElementsCapacity(capacity));
-      ElementsAccessorSubclass::GrowCapacityAndConvertImpl(array, capacity);
+      Subclass::GrowCapacityAndConvertImpl(array, capacity);
     }
 
     array->set_length(Smi::FromInt(length));
@@ -727,7 +716,7 @@
       return static_cast<uint32_t>(
           Smi::cast(JSArray::cast(receiver)->length())->value());
     }
-    return ElementsAccessorSubclass::GetCapacityImpl(receiver, elements);
+    return Subclass::GetCapacityImpl(receiver, elements);
   }
 
   static Handle<FixedArrayBase> ConvertElementsWithCapacity(
@@ -762,9 +751,8 @@
       packed_size = Smi::cast(JSArray::cast(*object)->length())->value();
     }
 
-    ElementsAccessorSubclass::CopyElementsImpl(
-        *old_elements, src_index, *new_elements, from_kind, dst_index,
-        packed_size, copy_size);
+    Subclass::CopyElementsImpl(*old_elements, src_index, *new_elements,
+                               from_kind, dst_index, packed_size, copy_size);
 
     return new_elements;
   }
@@ -784,12 +772,17 @@
     DCHECK(IsFastDoubleElementsKind(from_kind) !=
                IsFastDoubleElementsKind(kind()) ||
            IsDictionaryElementsKind(from_kind) ||
-           from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
            static_cast<uint32_t>(old_elements->length()) < capacity);
+    Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
+                                              kind(), capacity);
+  }
+
+  static void BasicGrowCapacityAndConvertImpl(
+      Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+      ElementsKind from_kind, ElementsKind to_kind, uint32_t capacity) {
     Handle<FixedArrayBase> elements =
         ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
 
-    ElementsKind to_kind = kind();
     if (IsHoleyElementsKind(from_kind)) to_kind = GetHoleyElementsKind(to_kind);
     Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, to_kind);
     JSObject::SetMapAndElements(object, new_map, elements);
@@ -805,11 +798,11 @@
 
   void GrowCapacityAndConvert(Handle<JSObject> object,
                               uint32_t capacity) final {
-    ElementsAccessorSubclass::GrowCapacityAndConvertImpl(object, capacity);
+    Subclass::GrowCapacityAndConvertImpl(object, capacity);
   }
 
   void Delete(Handle<JSObject> obj, uint32_t entry) final {
-    ElementsAccessorSubclass::DeleteImpl(obj, entry);
+    Subclass::DeleteImpl(obj, entry);
   }
 
   static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
@@ -833,7 +826,7 @@
       }
     }
     FixedArrayBase* from = from_holder->elements();
-    // NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
+    // NOTE: the Subclass::CopyElementsImpl() methods
     // violate the handlified function signature convention:
     // raw pointer parameters in the function that allocates. This is done
     // intentionally to avoid ArrayConcat() builtin performance degradation.
@@ -842,13 +835,12 @@
     // copying from object with fast double elements to object with object
     // elements. In all the other cases there are no allocations performed and
     // handle creation causes noticeable performance degradation of the builtin.
-    ElementsAccessorSubclass::CopyElementsImpl(
-        from, from_start, *to, from_kind, to_start, packed_size, copy_size);
+    Subclass::CopyElementsImpl(from, from_start, *to, from_kind, to_start,
+                               packed_size, copy_size);
   }
 
   Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
-    return ElementsAccessorSubclass::NormalizeImpl(object,
-                                                   handle(object->elements()));
+    return Subclass::NormalizeImpl(object, handle(object->elements()));
   }
 
   static Handle<SeededNumberDictionary> NormalizeImpl(
@@ -861,7 +853,7 @@
                                      Handle<FixedArray> values_or_entries,
                                      bool get_entries, int* nof_items,
                                      PropertyFilter filter) {
-    return ElementsAccessorSubclass::CollectValuesOrEntriesImpl(
+    return Subclass::CollectValuesOrEntriesImpl(
         isolate, object, values_or_entries, get_entries, nof_items, filter);
   }
 
@@ -872,9 +864,8 @@
     int count = 0;
     KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
     accumulator.NextPrototype();
-    ElementsAccessorSubclass::CollectElementIndicesImpl(
-        object, handle(object->elements(), isolate), &accumulator, kMaxUInt32,
-        ALL_PROPERTIES, 0);
+    Subclass::CollectElementIndicesImpl(
+        object, handle(object->elements(), isolate), &accumulator);
     Handle<FixedArray> keys = accumulator.GetKeys();
 
     for (int i = 0; i < keys->length(); ++i) {
@@ -883,15 +874,14 @@
       uint32_t index;
       if (!key->ToUint32(&index)) continue;
 
-      uint32_t entry = ElementsAccessorSubclass::GetEntryForIndexImpl(
+      uint32_t entry = Subclass::GetEntryForIndexImpl(
           *object, object->elements(), index, filter);
       if (entry == kMaxUInt32) continue;
 
-      PropertyDetails details =
-          ElementsAccessorSubclass::GetDetailsImpl(*object, entry);
+      PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
 
       if (details.kind() == kData) {
-        value = ElementsAccessorSubclass::GetImpl(object, entry);
+        value = Subclass::GetImpl(object, entry);
       } else {
         LookupIterator it(isolate, object, index, LookupIterator::OWN);
         ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -909,25 +899,20 @@
 
   void CollectElementIndices(Handle<JSObject> object,
                              Handle<FixedArrayBase> backing_store,
-                             KeyAccumulator* keys, uint32_t range,
-                             PropertyFilter filter, uint32_t offset) final {
-    if (filter & ONLY_ALL_CAN_READ) return;
-    ElementsAccessorSubclass::CollectElementIndicesImpl(
-        object, backing_store, keys, range, filter, offset);
+                             KeyAccumulator* keys) final {
+    if (keys->filter() & ONLY_ALL_CAN_READ) return;
+    Subclass::CollectElementIndicesImpl(object, backing_store, keys);
   }
 
   static void CollectElementIndicesImpl(Handle<JSObject> object,
                                         Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys, uint32_t range,
-                                        PropertyFilter filter,
-                                        uint32_t offset) {
+                                        KeyAccumulator* keys) {
     DCHECK_NE(DICTIONARY_ELEMENTS, kind());
     // Non-dictionary elements can't have all-can-read accessors.
     uint32_t length = GetIterationLength(*object, *backing_store);
-    if (range < length) length = range;
-    for (uint32_t i = offset; i < length; i++) {
-      if (ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
-                                                   filter)) {
+    PropertyFilter filter = keys->filter();
+    for (uint32_t i = 0; i < length; i++) {
+      if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
         keys->AddKey(i);
       }
     }
@@ -938,11 +923,9 @@
       Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
       PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
       uint32_t insertion_index = 0) {
-    uint32_t length =
-        ElementsAccessorSubclass::GetIterationLength(*object, *backing_store);
+    uint32_t length = Subclass::GetIterationLength(*object, *backing_store);
     for (uint32_t i = 0; i < length; i++) {
-      if (ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
-                                                   filter)) {
+      if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
         if (convert == CONVERT_TO_STRING) {
           Handle<String> index_string = isolate->factory()->Uint32ToString(i);
           list->set(insertion_index, *index_string);
@@ -961,8 +944,8 @@
                                            Handle<FixedArray> keys,
                                            GetKeysConversion convert,
                                            PropertyFilter filter) final {
-    return ElementsAccessorSubclass::PrependElementIndicesImpl(
-        object, backing_store, keys, convert, filter);
+    return Subclass::PrependElementIndicesImpl(object, backing_store, keys,
+                                               convert, filter);
   }
 
   static Handle<FixedArray> PrependElementIndicesImpl(
@@ -972,14 +955,14 @@
     Isolate* isolate = object->GetIsolate();
     uint32_t nof_property_keys = keys->length();
     uint32_t initial_list_length =
-        ElementsAccessorSubclass::GetCapacityImpl(*object, *backing_store);
+        Subclass::GetCapacityImpl(*object, *backing_store);
     initial_list_length += nof_property_keys;
 
     // Collect the element indices into a new list.
     uint32_t nof_indices = 0;
     Handle<FixedArray> combined_keys =
         isolate->factory()->NewFixedArray(initial_list_length);
-    combined_keys = ElementsAccessorSubclass::DirectCollectElementIndicesImpl(
+    combined_keys = Subclass::DirectCollectElementIndicesImpl(
         isolate, object, backing_store, convert, filter, combined_keys,
         &nof_indices);
 
@@ -1011,13 +994,10 @@
                        &array_length) &&
                    array_length <= Smi::kMaxValue)) {
         // Since we use std::sort above, the GC will no longer know where the
-        // HeapNumbers are, hence we have to write them again.
-        // For Arrays with valid Smi length, we are sure to have no HeapNumber
-        // indices and thus we can skip this step.
-        for (uint32_t i = 0; i < nof_indices; i++) {
-          Object* index = combined_keys->get(i);
-          combined_keys->set(i, index);
-        }
+        // HeapNumbers are.  For Arrays with valid Smi length, we are sure to
+        // have no HeapNumber indices and thus we can skip this step.
+        FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(isolate->heap(), *combined_keys, 0,
+                                           nof_indices);
       }
     }
 
@@ -1038,8 +1018,7 @@
   void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
                                    KeyAccumulator* accumulator,
                                    AddKeyConversion convert) final {
-    ElementsAccessorSubclass::AddElementsToKeyAccumulatorImpl(
-        receiver, accumulator, convert);
+    Subclass::AddElementsToKeyAccumulatorImpl(receiver, accumulator, convert);
   }
 
   static uint32_t GetCapacityImpl(JSObject* holder,
@@ -1048,7 +1027,7 @@
   }
 
   uint32_t GetCapacity(JSObject* holder, FixedArrayBase* backing_store) final {
-    return ElementsAccessorSubclass::GetCapacityImpl(holder, backing_store);
+    return Subclass::GetCapacityImpl(holder, backing_store);
   }
 
   static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store,
@@ -1060,8 +1039,7 @@
                                        FixedArrayBase* backing_store,
                                        uint32_t index, PropertyFilter filter) {
     if (IsHoleyElementsKind(kind())) {
-      return index < ElementsAccessorSubclass::GetCapacityImpl(holder,
-                                                               backing_store) &&
+      return index < Subclass::GetCapacityImpl(holder, backing_store) &&
                      !BackingStore::cast(backing_store)->is_the_hole(index)
                  ? index
                  : kMaxUInt32;
@@ -1073,8 +1051,8 @@
 
   uint32_t GetEntryForIndex(JSObject* holder, FixedArrayBase* backing_store,
                             uint32_t index) final {
-    return ElementsAccessorSubclass::GetEntryForIndexImpl(
-        holder, backing_store, index, ALL_PROPERTIES);
+    return Subclass::GetEntryForIndexImpl(holder, backing_store, index,
+                                          ALL_PROPERTIES);
   }
 
   static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
@@ -1087,7 +1065,7 @@
   }
 
   PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final {
-    return ElementsAccessorSubclass::GetDetailsImpl(holder, entry);
+    return Subclass::GetDetailsImpl(holder, entry);
   }
 
  private:
@@ -1333,16 +1311,15 @@
 
   static void CollectElementIndicesImpl(Handle<JSObject> object,
                                         Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys, uint32_t range,
-                                        PropertyFilter filter,
-                                        uint32_t offset) {
-    if (filter & SKIP_STRINGS) return;
+                                        KeyAccumulator* keys) {
+    if (keys->filter() & SKIP_STRINGS) return;
     Isolate* isolate = keys->isolate();
     Handle<Object> undefined = isolate->factory()->undefined_value();
     Handle<Object> the_hole = isolate->factory()->the_hole_value();
     Handle<SeededNumberDictionary> dictionary =
         Handle<SeededNumberDictionary>::cast(backing_store);
     int capacity = dictionary->Capacity();
+    PropertyFilter filter = keys->filter();
     for (int i = 0; i < capacity; i++) {
       uint32_t key =
           GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
@@ -1403,21 +1380,18 @@
 
 
 // Super class for all fast element arrays.
-template<typename FastElementsAccessorSubclass,
-         typename KindTraits>
-class FastElementsAccessor
-    : public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> {
+template <typename Subclass, typename KindTraits>
+class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
  public:
   explicit FastElementsAccessor(const char* name)
-      : ElementsAccessorBase<FastElementsAccessorSubclass,
-                             KindTraits>(name) {}
+      : ElementsAccessorBase<Subclass, KindTraits>(name) {}
 
   typedef typename KindTraits::BackingStore BackingStore;
 
   static Handle<SeededNumberDictionary> NormalizeImpl(
       Handle<JSObject> object, Handle<FixedArrayBase> store) {
     Isolate* isolate = store->GetIsolate();
-    ElementsKind kind = FastElementsAccessorSubclass::kind();
+    ElementsKind kind = Subclass::kind();
 
     // Ensure that notifications fire if the array or object prototypes are
     // normalizing.
@@ -1436,7 +1410,7 @@
       if (IsHoleyElementsKind(kind)) {
         if (BackingStore::cast(*store)->is_the_hole(i)) continue;
       }
-      Handle<Object> value = FastElementsAccessorSubclass::GetImpl(*store, i);
+      Handle<Object> value = Subclass::GetImpl(*store, i);
       dictionary = SeededNumberDictionary::AddNumberEntry(
           dictionary, i, value, details, used_as_prototype);
       j++;
@@ -1453,7 +1427,9 @@
     }
     if (entry == 0) {
       FixedArray* empty = heap->empty_fixed_array();
-      if (obj->HasFastArgumentsElements()) {
+      // Dynamically ask for the elements kind here since we manually redirect
+      // the operations for argument backing stores.
+      if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
         FixedArray::cast(obj->elements())->set(1, empty);
       } else {
         obj->set_elements(empty);
@@ -1538,14 +1514,13 @@
                       uint32_t new_capacity) {
     DCHECK_EQ(NONE, attributes);
     ElementsKind from_kind = object->GetElementsKind();
-    ElementsKind to_kind = FastElementsAccessorSubclass::kind();
+    ElementsKind to_kind = Subclass::kind();
     if (IsDictionaryElementsKind(from_kind) ||
         IsFastDoubleElementsKind(from_kind) !=
             IsFastDoubleElementsKind(to_kind) ||
-        FastElementsAccessorSubclass::GetCapacityImpl(
-            *object, object->elements()) != new_capacity) {
-      FastElementsAccessorSubclass::GrowCapacityAndConvertImpl(object,
-                                                               new_capacity);
+        Subclass::GetCapacityImpl(*object, object->elements()) !=
+            new_capacity) {
+      Subclass::GrowCapacityAndConvertImpl(object, new_capacity);
     } else {
       if (IsFastElementsKind(from_kind) && from_kind != to_kind) {
         JSObject::TransitionElementsKind(object, to_kind);
@@ -1555,7 +1530,7 @@
         JSObject::EnsureWritableFastElements(object);
       }
     }
-    FastElementsAccessorSubclass::SetImpl(object, index, *value);
+    Subclass::SetImpl(object, index, *value);
   }
 
   static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
@@ -1578,13 +1553,11 @@
                                               AddKeyConversion convert) {
     Handle<FixedArrayBase> elements(receiver->elements(),
                                     receiver->GetIsolate());
-    uint32_t length =
-        FastElementsAccessorSubclass::GetIterationLength(*receiver, *elements);
+    uint32_t length = Subclass::GetIterationLength(*receiver, *elements);
     for (uint32_t i = 0; i < length; i++) {
       if (IsFastPackedElementsKind(KindTraits::Kind) ||
           HasEntryImpl(*elements, i)) {
-        accumulator->AddKey(FastElementsAccessorSubclass::GetImpl(*elements, i),
-                            convert);
+        accumulator->AddKey(Subclass::GetImpl(*elements, i), convert);
       }
     }
   }
@@ -1592,16 +1565,20 @@
   static void ValidateContents(Handle<JSObject> holder, int length) {
 #if DEBUG
     Isolate* isolate = holder->GetIsolate();
+    Heap* heap = isolate->heap();
     HandleScope scope(isolate);
     Handle<FixedArrayBase> elements(holder->elements(), isolate);
     Map* map = elements->map();
-    DCHECK((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
-            (map == isolate->heap()->fixed_array_map() ||
-             map == isolate->heap()->fixed_cow_array_map())) ||
-           (IsFastDoubleElementsKind(KindTraits::Kind) ==
-            ((map == isolate->heap()->fixed_array_map() && length == 0) ||
-             map == isolate->heap()->fixed_double_array_map())));
+    if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
+      DCHECK_NE(map, heap->fixed_double_array_map());
+    } else if (IsFastDoubleElementsKind(KindTraits::Kind)) {
+      DCHECK_NE(map, heap->fixed_cow_array_map());
+      if (map == heap->fixed_array_map()) DCHECK_EQ(0, length);
+    } else {
+      UNREACHABLE();
+    }
     if (length == 0) return;  // nothing to do!
+#if ENABLE_SLOW_DCHECKS
     DisallowHeapAllocation no_gc;
     Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
     if (IsFastSmiElementsKind(KindTraits::Kind)) {
@@ -1610,30 +1587,38 @@
                (IsFastHoleyElementsKind(KindTraits::Kind) &&
                 backing_store->is_the_hole(i)));
       }
+    } else if (KindTraits::Kind == FAST_ELEMENTS ||
+               KindTraits::Kind == FAST_DOUBLE_ELEMENTS) {
+      for (int i = 0; i < length; i++) {
+        DCHECK(!backing_store->is_the_hole(i));
+      }
+    } else {
+      DCHECK(IsFastHoleyElementsKind(KindTraits::Kind));
     }
 #endif
+#endif
   }
 
   static Handle<Object> PopImpl(Handle<JSArray> receiver) {
-    return FastElementsAccessorSubclass::RemoveElement(receiver, AT_END);
+    return Subclass::RemoveElement(receiver, AT_END);
   }
 
   static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
-    return FastElementsAccessorSubclass::RemoveElement(receiver, AT_START);
+    return Subclass::RemoveElement(receiver, AT_START);
   }
 
   static uint32_t PushImpl(Handle<JSArray> receiver,
                            Arguments* args, uint32_t push_size) {
     Handle<FixedArrayBase> backing_store(receiver->elements());
-    return FastElementsAccessorSubclass::AddArguments(receiver, backing_store,
-                                                      args, push_size, AT_END);
+    return Subclass::AddArguments(receiver, backing_store, args, push_size,
+                                  AT_END);
   }
 
   static uint32_t UnshiftImpl(Handle<JSArray> receiver,
                               Arguments* args, uint32_t unshift_size) {
     Handle<FixedArrayBase> backing_store(receiver->elements());
-    return FastElementsAccessorSubclass::AddArguments(
-        receiver, backing_store, args, unshift_size, AT_START);
+    return Subclass::AddArguments(receiver, backing_store, args, unshift_size,
+                                  AT_START);
   }
 
   static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
@@ -1644,11 +1629,10 @@
     Handle<JSArray> result_array = isolate->factory()->NewJSArray(
         KindTraits::Kind, result_len, result_len);
     DisallowHeapAllocation no_gc;
-    FastElementsAccessorSubclass::CopyElementsImpl(
-        *backing_store, start, result_array->elements(), KindTraits::Kind, 0,
-        kPackedSizeNotKnown, result_len);
-    FastElementsAccessorSubclass::TryTransitionResultArrayToPacked(
-        result_array);
+    Subclass::CopyElementsImpl(*backing_store, start, result_array->elements(),
+                               KindTraits::Kind, 0, kPackedSizeNotKnown,
+                               result_len);
+    Subclass::TryTransitionResultArrayToPacked(result_array);
     return result_array;
   }
 
@@ -1681,29 +1665,26 @@
         KindTraits::Kind, delete_count, delete_count);
     if (delete_count > 0) {
       DisallowHeapAllocation no_gc;
-      FastElementsAccessorSubclass::CopyElementsImpl(
-          *backing_store, start, deleted_elements->elements(), KindTraits::Kind,
-          0, kPackedSizeNotKnown, delete_count);
+      Subclass::CopyElementsImpl(*backing_store, start,
+                                 deleted_elements->elements(), KindTraits::Kind,
+                                 0, kPackedSizeNotKnown, delete_count);
     }
 
     // Delete and move elements to make space for add_count new elements.
     if (add_count < delete_count) {
-      FastElementsAccessorSubclass::SpliceShrinkStep(
-          isolate, receiver, backing_store, start, delete_count, add_count,
-          length, new_length);
+      Subclass::SpliceShrinkStep(isolate, receiver, backing_store, start,
+                                 delete_count, add_count, length, new_length);
     } else if (add_count > delete_count) {
-      backing_store = FastElementsAccessorSubclass::SpliceGrowStep(
-          isolate, receiver, backing_store, start, delete_count, add_count,
-          length, new_length);
+      backing_store =
+          Subclass::SpliceGrowStep(isolate, receiver, backing_store, start,
+                                   delete_count, add_count, length, new_length);
     }
 
     // Copy over the arguments.
-    FastElementsAccessorSubclass::CopyArguments(args, backing_store, add_count,
-                                                3, start);
+    Subclass::CopyArguments(args, backing_store, add_count, 3, start);
 
     receiver->set_length(Smi::FromInt(new_length));
-    FastElementsAccessorSubclass::TryTransitionResultArrayToPacked(
-        deleted_elements);
+    Subclass::TryTransitionResultArrayToPacked(deleted_elements);
     return deleted_elements;
   }
 
@@ -1715,8 +1696,7 @@
     uint32_t length = object->elements()->length();
     for (uint32_t index = 0; index < length; ++index) {
       if (!HasEntryImpl(object->elements(), index)) continue;
-      Handle<Object> value =
-          FastElementsAccessorSubclass::GetImpl(object->elements(), index);
+      Handle<Object> value = Subclass::GetImpl(object->elements(), index);
       if (get_entries) {
         value = MakeEntryPair(isolate, index, value);
       }
@@ -1765,9 +1745,9 @@
                                uint32_t new_length) {
     const int move_left_count = len - delete_count - start;
     const int move_left_dst_index = start + add_count;
-    FastElementsAccessorSubclass::MoveElements(
-        isolate, receiver, backing_store, move_left_dst_index,
-        start + delete_count, move_left_count, new_length, len);
+    Subclass::MoveElements(isolate, receiver, backing_store,
+                           move_left_dst_index, start + delete_count,
+                           move_left_count, new_length, len);
   }
 
   // SpliceGrowStep might modify the backing_store.
@@ -1780,23 +1760,22 @@
     DCHECK((add_count - delete_count) <= (Smi::kMaxValue - length));
     // Check if backing_store is big enough.
     if (new_length <= static_cast<uint32_t>(backing_store->length())) {
-      FastElementsAccessorSubclass::MoveElements(
-          isolate, receiver, backing_store, start + add_count,
-          start + delete_count, (length - delete_count - start), 0, 0);
+      Subclass::MoveElements(isolate, receiver, backing_store,
+                             start + add_count, start + delete_count,
+                             (length - delete_count - start), 0, 0);
       // MoveElements updates the backing_store in-place.
       return backing_store;
     }
     // New backing storage is needed.
     int capacity = JSObject::NewElementsCapacity(new_length);
     // Partially copy all elements up to start.
-    Handle<FixedArrayBase> new_elms =
-        FastElementsAccessorSubclass::ConvertElementsWithCapacity(
-            receiver, backing_store, KindTraits::Kind, capacity, start);
+    Handle<FixedArrayBase> new_elms = Subclass::ConvertElementsWithCapacity(
+        receiver, backing_store, KindTraits::Kind, capacity, start);
     // Copy the trailing elements after start + delete_count
-    FastElementsAccessorSubclass::CopyElementsImpl(
-        *backing_store, start + delete_count, *new_elms, KindTraits::Kind,
-        start + add_count, kPackedSizeNotKnown,
-        ElementsAccessor::kCopyToEndAndInitializeToHole);
+    Subclass::CopyElementsImpl(*backing_store, start + delete_count, *new_elms,
+                               KindTraits::Kind, start + add_count,
+                               kPackedSizeNotKnown,
+                               ElementsAccessor::kCopyToEndAndInitializeToHole);
     receiver->set_elements(*new_elms);
     return new_elms;
   }
@@ -1815,14 +1794,12 @@
     DCHECK(length > 0);
     int new_length = length - 1;
     int remove_index = remove_position == AT_START ? 0 : new_length;
-    Handle<Object> result =
-        FastElementsAccessorSubclass::GetImpl(*backing_store, remove_index);
+    Handle<Object> result = Subclass::GetImpl(*backing_store, remove_index);
     if (remove_position == AT_START) {
-      FastElementsAccessorSubclass::MoveElements(
-          isolate, receiver, backing_store, 0, 1, new_length, 0, 0);
+      Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length,
+                             0, 0);
     }
-    FastElementsAccessorSubclass::SetLengthImpl(isolate, receiver, new_length,
-                                                backing_store);
+    Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store);
 
     if (IsHoleyElementsKind(kind) && result->IsTheHole()) {
       return isolate->factory()->undefined_value();
@@ -1833,7 +1810,7 @@
   static uint32_t AddArguments(Handle<JSArray> receiver,
                                Handle<FixedArrayBase> backing_store,
                                Arguments* args, uint32_t add_size,
-                               Where remove_position) {
+                               Where add_position) {
     uint32_t length = Smi::cast(receiver->length())->value();
     DCHECK(0 < add_size);
     uint32_t elms_len = backing_store->length();
@@ -1845,24 +1822,23 @@
       // New backing storage is needed.
       uint32_t capacity = JSObject::NewElementsCapacity(new_length);
       // If we add arguments to the start we have to shift the existing objects.
-      int copy_dst_index = remove_position == AT_START ? add_size : 0;
+      int copy_dst_index = add_position == AT_START ? add_size : 0;
       // Copy over all objects to a new backing_store.
-      backing_store = FastElementsAccessorSubclass::ConvertElementsWithCapacity(
+      backing_store = Subclass::ConvertElementsWithCapacity(
           receiver, backing_store, KindTraits::Kind, capacity, 0,
           copy_dst_index, ElementsAccessor::kCopyToEndAndInitializeToHole);
       receiver->set_elements(*backing_store);
-    } else if (remove_position == AT_START) {
+    } else if (add_position == AT_START) {
       // If the backing store has enough capacity and we add elements to the
       // start we have to shift the existing objects.
       Isolate* isolate = receiver->GetIsolate();
-      FastElementsAccessorSubclass::MoveElements(
-          isolate, receiver, backing_store, add_size, 0, length, 0, 0);
+      Subclass::MoveElements(isolate, receiver, backing_store, add_size, 0,
+                             length, 0, 0);
     }
 
-    int insertion_index = remove_position == AT_START ? 0 : length;
+    int insertion_index = add_position == AT_START ? 0 : length;
     // Copy the arguments to the start.
-    FastElementsAccessorSubclass::CopyArguments(args, backing_store, add_size,
-                                                1, insertion_index);
+    Subclass::CopyArguments(args, backing_store, add_size, 1, insertion_index);
     // Set the length.
     receiver->set_length(Smi::FromInt(new_length));
     return new_length;
@@ -1876,22 +1852,19 @@
     FixedArrayBase* raw_backing_store = *dst_store;
     WriteBarrierMode mode = raw_backing_store->GetWriteBarrierMode(no_gc);
     for (uint32_t i = 0; i < copy_size; i++) {
-      Object* argument = (*args)[i + src_index];
-      FastElementsAccessorSubclass::SetImpl(raw_backing_store, i + dst_index,
-                                            argument, mode);
+      Object* argument = (*args)[src_index + i];
+      DCHECK(!argument->IsTheHole());
+      Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode);
     }
   }
 };
 
-
-template<typename FastElementsAccessorSubclass,
-         typename KindTraits>
+template <typename Subclass, typename KindTraits>
 class FastSmiOrObjectElementsAccessor
-    : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> {
+    : public FastElementsAccessor<Subclass, KindTraits> {
  public:
   explicit FastSmiOrObjectElementsAccessor(const char* name)
-      : FastElementsAccessor<FastElementsAccessorSubclass,
-                             KindTraits>(name) {}
+      : FastElementsAccessor<Subclass, KindTraits>(name) {}
 
   static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
                              Object* value) {
@@ -1909,8 +1882,7 @@
   }
 
   static Object* GetRaw(FixedArray* backing_store, uint32_t entry) {
-    uint32_t index = FastElementsAccessorSubclass::GetIndexForEntryImpl(
-        backing_store, entry);
+    uint32_t index = Subclass::GetIndexForEntryImpl(backing_store, entry);
     return backing_store->get(index);
   }
 
@@ -1931,7 +1903,6 @@
       case FAST_HOLEY_SMI_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_HOLEY_ELEMENTS:
-      case FAST_STRING_WRAPPER_ELEMENTS:
         CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
                                    to_start, copy_size);
         break;
@@ -1943,12 +1914,13 @@
         break;
       }
       case DICTIONARY_ELEMENTS:
-      case SLOW_STRING_WRAPPER_ELEMENTS:
         CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
                                        copy_size);
         break;
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
       TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
@@ -2010,15 +1982,12 @@
           ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
 };
 
-
-template<typename FastElementsAccessorSubclass,
-         typename KindTraits>
+template <typename Subclass, typename KindTraits>
 class FastDoubleElementsAccessor
-    : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> {
+    : public FastElementsAccessor<Subclass, KindTraits> {
  public:
   explicit FastDoubleElementsAccessor(const char* name)
-      : FastElementsAccessor<FastElementsAccessorSubclass,
-                             KindTraits>(name) {}
+      : FastElementsAccessor<Subclass, KindTraits>(name) {}
 
   static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
     return GetImpl(holder->elements(), entry);
@@ -2239,16 +2208,12 @@
 TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
 #undef FIXED_ELEMENTS_ACCESSOR
 
-
-template <typename SloppyArgumentsElementsAccessorSubclass,
-          typename ArgumentsAccessor, typename KindTraits>
+template <typename Subclass, typename ArgumentsAccessor, typename KindTraits>
 class SloppyArgumentsElementsAccessor
-    : public ElementsAccessorBase<SloppyArgumentsElementsAccessorSubclass,
-                                  KindTraits> {
+    : public ElementsAccessorBase<Subclass, KindTraits> {
  public:
   explicit SloppyArgumentsElementsAccessor(const char* name)
-      : ElementsAccessorBase<SloppyArgumentsElementsAccessorSubclass,
-                             KindTraits>(name) {
+      : ElementsAccessorBase<Subclass, KindTraits>(name) {
     USE(KindTraits::Kind);
   }
 
@@ -2384,7 +2349,7 @@
     FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
     uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
                                                              index, filter);
-    if (entry == kMaxUInt32) return entry;
+    if (entry == kMaxUInt32) return kMaxUInt32;
     return (parameter_map->length() - 2) + entry;
   }
 
@@ -2414,31 +2379,24 @@
       // would enable GC of the context.
       parameter_map->set_the_hole(entry + 2);
     } else {
-      SloppyArgumentsElementsAccessorSubclass::DeleteFromArguments(
-          obj, entry - length);
+      Subclass::DeleteFromArguments(obj, entry - length);
     }
   }
 
   static void CollectElementIndicesImpl(Handle<JSObject> object,
                                         Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys, uint32_t range,
-                                        PropertyFilter filter,
-                                        uint32_t offset) {
+                                        KeyAccumulator* keys) {
     FixedArray* parameter_map = FixedArray::cast(*backing_store);
     uint32_t length = parameter_map->length() - 2;
-    if (range < length) length = range;
-
-    for (uint32_t i = offset; i < length; ++i) {
+    for (uint32_t i = 0; i < length; ++i) {
       if (!parameter_map->get(i + 2)->IsTheHole()) {
         keys->AddKey(i);
       }
     }
 
     Handle<FixedArrayBase> store(FixedArrayBase::cast(parameter_map->get(1)));
-    ArgumentsAccessor::CollectElementIndicesImpl(object, store, keys, range,
-                                                 filter, offset);
-    if (SloppyArgumentsElementsAccessorSubclass::kind() ==
-        FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
+    ArgumentsAccessor::CollectElementIndicesImpl(object, store, keys);
+    if (Subclass::kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
       keys->SortCurrentElementsList();
     }
   }
@@ -2568,16 +2526,45 @@
             FastHoleyObjectElementsAccessor,
             ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
 
+  static Handle<FixedArray> GetArguments(Isolate* isolate,
+                                         FixedArrayBase* backing_store) {
+    FixedArray* parameter_map = FixedArray::cast(backing_store);
+    return Handle<FixedArray>(FixedArray::cast(parameter_map->get(1)), isolate);
+  }
+
+  static Handle<JSArray> SliceImpl(Handle<JSObject> receiver, uint32_t start,
+                                   uint32_t end) {
+    Isolate* isolate = receiver->GetIsolate();
+    uint32_t result_len = end < start ? 0u : end - start;
+    Handle<JSArray> result_array = isolate->factory()->NewJSArray(
+        FAST_HOLEY_ELEMENTS, result_len, result_len);
+    DisallowHeapAllocation no_gc;
+    FixedArray* elements = FixedArray::cast(result_array->elements());
+    FixedArray* parameters = FixedArray::cast(receiver->elements());
+    uint32_t insertion_index = 0;
+    for (uint32_t i = start; i < end; i++) {
+      uint32_t entry =
+          GetEntryForIndexImpl(*receiver, parameters, i, ALL_PROPERTIES);
+      if (entry != kMaxUInt32 && HasEntryImpl(parameters, entry)) {
+        elements->set(insertion_index, *GetImpl(parameters, entry));
+      } else {
+        elements->set_the_hole(insertion_index);
+      }
+      insertion_index++;
+    }
+    return result_array;
+  }
+
   static Handle<SeededNumberDictionary> NormalizeImpl(
       Handle<JSObject> object, Handle<FixedArrayBase> elements) {
-    FixedArray* parameter_map = FixedArray::cast(*elements);
-    Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
+    Handle<FixedArray> arguments =
+        GetArguments(elements->GetIsolate(), *elements);
     return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments);
   }
 
   static void DeleteFromArguments(Handle<JSObject> obj, uint32_t entry) {
-    FixedArray* parameter_map = FixedArray::cast(obj->elements());
-    Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
+    Handle<FixedArray> arguments =
+        GetArguments(obj->GetIsolate(), obj->elements());
     FastHoleyObjectElementsAccessor::DeleteCommon(obj, entry, arguments);
   }
 
@@ -2650,15 +2637,12 @@
   }
 };
 
-template <typename StringWrapperElementsAccessorSubclass,
-          typename BackingStoreAccessor, typename KindTraits>
+template <typename Subclass, typename BackingStoreAccessor, typename KindTraits>
 class StringWrapperElementsAccessor
-    : public ElementsAccessorBase<StringWrapperElementsAccessorSubclass,
-                                  KindTraits> {
+    : public ElementsAccessorBase<Subclass, KindTraits> {
  public:
   explicit StringWrapperElementsAccessor(const char* name)
-      : ElementsAccessorBase<StringWrapperElementsAccessorSubclass, KindTraits>(
-            name) {
+      : ElementsAccessorBase<Subclass, KindTraits>(name) {
     USE(KindTraits::Kind);
   }
 
@@ -2722,8 +2706,7 @@
         (object->GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS ||
          BackingStoreAccessor::GetCapacityImpl(*object, object->elements()) !=
              new_capacity)) {
-      StringWrapperElementsAccessorSubclass::GrowCapacityAndConvertImpl(
-          object, new_capacity);
+      GrowCapacityAndConvertImpl(object, new_capacity);
     }
     BackingStoreAccessor::AddImpl(object, index, value, attributes,
                                   new_capacity);
@@ -2760,23 +2743,41 @@
 
   static void CollectElementIndicesImpl(Handle<JSObject> object,
                                         Handle<FixedArrayBase> backing_store,
-                                        KeyAccumulator* keys, uint32_t range,
-                                        PropertyFilter filter,
-                                        uint32_t offset) {
+                                        KeyAccumulator* keys) {
     uint32_t length = GetString(*object)->length();
     for (uint32_t i = 0; i < length; i++) {
       keys->AddKey(i);
     }
-    BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store, keys,
-                                                    range, filter, offset);
+    BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store,
+                                                    keys);
+  }
+
+  static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+                                         uint32_t capacity) {
+    Handle<FixedArrayBase> old_elements(object->elements());
+    ElementsKind from_kind = object->GetElementsKind();
+    // This method should only be called if there's a reason to update the
+    // elements.
+    DCHECK(from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
+           static_cast<uint32_t>(old_elements->length()) < capacity);
+    Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
+                                              FAST_STRING_WRAPPER_ELEMENTS,
+                                              capacity);
   }
 
   static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
                                FixedArrayBase* to, ElementsKind from_kind,
                                uint32_t to_start, int packed_size,
                                int copy_size) {
-    BackingStoreAccessor::CopyElementsImpl(from, from_start, to, from_kind,
-                                           to_start, packed_size, copy_size);
+    DCHECK(!to->IsDictionary());
+    if (from_kind == SLOW_STRING_WRAPPER_ELEMENTS) {
+      CopyDictionaryToObjectElements(from, from_start, to, FAST_HOLEY_ELEMENTS,
+                                     to_start, copy_size);
+    } else {
+      DCHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, from_kind);
+      CopyObjectToObjectElements(from, FAST_HOLEY_ELEMENTS, from_start, to,
+                                 FAST_HOLEY_ELEMENTS, to_start, copy_size);
+    }
   }
 
  private:
@@ -2914,7 +2915,7 @@
   }
 
   // Fill in the content
-  switch (array->GetElementsKind()) {
+  switch (elements_kind) {
     case FAST_HOLEY_SMI_ELEMENTS:
     case FAST_SMI_ELEMENTS: {
       Handle<FixedArray> smi_elms = Handle<FixedArray>::cast(elms);
@@ -2975,31 +2976,17 @@
   elements_accessors_ = NULL;
 }
 
-
 Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
-                                         uint32_t concat_size) {
-  const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
-  STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
-  USE(kHalfOfMaxInt);
-  uint32_t result_len = 0;
-  bool has_raw_doubles = false;
+                                         uint32_t concat_size,
+                                         uint32_t result_len) {
   ElementsKind result_elements_kind = GetInitialFastElementsKind();
+  bool has_raw_doubles = false;
   {
     DisallowHeapAllocation no_gc;
     bool is_holey = false;
-    // Iterate through all the arguments performing checks
-    // and calculating total length.
     for (uint32_t i = 0; i < concat_size; i++) {
-      JSArray* array = JSArray::cast((*args)[i]);
-      uint32_t len = 0;
-      array->length()->ToArrayLength(&len);
-
-      // We shouldn't overflow when adding another len.
-      result_len += len;
-      DCHECK(0 <= result_len);
-      DCHECK(result_len <= FixedDoubleArray::kMaxLength);
-
-      ElementsKind arg_kind = array->GetElementsKind();
+      Object* arg = (*args)[i];
+      ElementsKind arg_kind = JSArray::cast(arg)->GetElementsKind();
       has_raw_doubles = has_raw_doubles || IsFastDoubleElementsKind(arg_kind);
       is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
       result_elements_kind =
diff --git a/src/elements.h b/src/elements.h
index 2b18ab0..9f70f2a 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -81,18 +81,12 @@
   // whose PropertyAttribute match |filter|.
   virtual void CollectElementIndices(Handle<JSObject> object,
                                      Handle<FixedArrayBase> backing_store,
-                                     KeyAccumulator* keys,
-                                     uint32_t range = kMaxUInt32,
-                                     PropertyFilter filter = ALL_PROPERTIES,
-                                     uint32_t offset = 0) = 0;
+                                     KeyAccumulator* keys) = 0;
 
   inline void CollectElementIndices(Handle<JSObject> object,
-                                    KeyAccumulator* keys,
-                                    uint32_t range = kMaxUInt32,
-                                    PropertyFilter filter = ALL_PROPERTIES,
-                                    uint32_t offset = 0) {
-    CollectElementIndices(object, handle(object->elements()), keys, range,
-                          filter, offset);
+                                    KeyAccumulator* keys) {
+    CollectElementIndices(object, handle(object->elements(), keys->isolate()),
+                          keys);
   }
 
   virtual Maybe<bool> CollectValuesOrEntries(
@@ -100,7 +94,6 @@
       Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
       PropertyFilter filter = ALL_PROPERTIES) = 0;
 
-  //
   virtual Handle<FixedArray> PrependElementIndices(
       Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
       Handle<FixedArray> keys, GetKeysConversion convert,
@@ -135,7 +128,7 @@
                    uint32_t new_capacity) = 0;
 
   static Handle<JSArray> Concat(Isolate* isolate, Arguments* args,
-                                uint32_t concat_size);
+                                uint32_t concat_size, uint32_t result_length);
 
   virtual uint32_t Push(Handle<JSArray> receiver, Arguments* args,
                         uint32_t push_size) = 0;
diff --git a/src/execution.cc b/src/execution.cc
index a092a8a..37e41d5 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -94,6 +94,7 @@
     if (FLAG_profile_deserialization && target->IsJSFunction()) {
       PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
     }
+    RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::JS_Execution);
     value = CALL_GENERATED_CODE(isolate, stub_entry, orig_func, func, recv,
                                 argc, argv);
   }
diff --git a/src/extensions/ignition-statistics-extension.cc b/src/extensions/ignition-statistics-extension.cc
new file mode 100644
index 0000000..b22c599
--- /dev/null
+++ b/src/extensions/ignition-statistics-extension.cc
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/extensions/ignition-statistics-extension.h"
+
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Local<v8::FunctionTemplate>
+IgnitionStatisticsExtension::GetNativeFunctionTemplate(
+    v8::Isolate* isolate, v8::Local<v8::String> name) {
+  DCHECK_EQ(strcmp(*v8::String::Utf8Value(name), "getIgnitionDispatchCounters"),
+            0);
+  return v8::FunctionTemplate::New(
+      isolate, IgnitionStatisticsExtension::GetIgnitionDispatchCounters);
+}
+
+const char* const IgnitionStatisticsExtension::kSource =
+    "native function getIgnitionDispatchCounters();";
+
+void IgnitionStatisticsExtension::GetIgnitionDispatchCounters(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  DCHECK_EQ(args.Length(), 0);
+  DCHECK(FLAG_trace_ignition_dispatches);
+  args.GetReturnValue().Set(reinterpret_cast<Isolate*>(args.GetIsolate())
+                                ->interpreter()
+                                ->GetDispatchCountersObject());
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/extensions/ignition-statistics-extension.h b/src/extensions/ignition-statistics-extension.h
new file mode 100644
index 0000000..fee55f6
--- /dev/null
+++ b/src/extensions/ignition-statistics-extension.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
+#define V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
+
+#include "include/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class IgnitionStatisticsExtension : public v8::Extension {
+ public:
+  IgnitionStatisticsExtension()
+      : v8::Extension("v8/ignition-statistics", kSource) {}
+
+  v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+      v8::Isolate* isolate, v8::Local<v8::String> name) override;
+
+  static void GetIgnitionDispatchCounters(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+  static const char* const kSource;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
diff --git a/src/extensions/statistics-extension.cc b/src/extensions/statistics-extension.cc
index 76dcd43..e6649a6 100644
--- a/src/extensions/statistics-extension.cc
+++ b/src/extensions/statistics-extension.cc
@@ -116,7 +116,7 @@
   };
 
   const StatisticNumber numbers[] = {
-      {isolate->memory_allocator()->Size(), "total_committed_bytes"},
+      {heap->memory_allocator()->Size(), "total_committed_bytes"},
       {heap->new_space()->Size(), "new_space_live_bytes"},
       {heap->new_space()->Available(), "new_space_available_bytes"},
       {heap->new_space()->CommittedMemory(), "new_space_commited_bytes"},
diff --git a/src/external-reference-table.cc b/src/external-reference-table.cc
index 29a2474..6b8b7d8 100644
--- a/src/external-reference-table.cc
+++ b/src/external-reference-table.cc
@@ -59,6 +59,8 @@
   Add(ExternalReference::isolate_address(isolate).address(), "isolate");
   Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
       "Interpreter::dispatch_table_address");
+  Add(ExternalReference::interpreter_dispatch_counters(isolate).address(),
+      "Interpreter::interpreter_dispatch_counters");
   Add(ExternalReference::address_of_negative_infinity().address(),
       "LDoubleConstant::negative_infinity");
   Add(ExternalReference::power_double_double_function(isolate).address(),
@@ -134,6 +136,14 @@
       "wasm::uint64_div");
   Add(ExternalReference::wasm_uint64_mod(isolate).address(),
       "wasm::uint64_mod");
+  Add(ExternalReference::wasm_word32_ctz(isolate).address(),
+      "wasm::word32_ctz");
+  Add(ExternalReference::wasm_word64_ctz(isolate).address(),
+      "wasm::word64_ctz");
+  Add(ExternalReference::wasm_word32_popcnt(isolate).address(),
+      "wasm::word32_popcnt");
+  Add(ExternalReference::wasm_word64_popcnt(isolate).address(),
+      "wasm::word64_popcnt");
   Add(ExternalReference::f64_acos_wrapper_function(isolate).address(),
       "f64_acos_wrapper");
   Add(ExternalReference::f64_asin_wrapper_function(isolate).address(),
@@ -295,19 +305,27 @@
     const char* name;
   };
 
-  static const AccessorRefTable accessors[] = {
+  static const AccessorRefTable getters[] = {
 #define ACCESSOR_INFO_DECLARATION(name) \
   {FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
       ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
 #undef ACCESSOR_INFO_DECLARATION
+  };
+  static const AccessorRefTable setters[] = {
 #define ACCESSOR_SETTER_DECLARATION(name) \
   {FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
-          ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+      ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
 #undef ACCESSOR_INFO_DECLARATION
   };
 
-  for (unsigned i = 0; i < arraysize(accessors); ++i) {
-    Add(accessors[i].address, accessors[i].name);
+  for (unsigned i = 0; i < arraysize(getters); ++i) {
+    Add(getters[i].address, getters[i].name);
+    Add(AccessorInfo::redirect(isolate, getters[i].address, ACCESSOR_GETTER),
+        "");
+  }
+
+  for (unsigned i = 0; i < arraysize(setters); ++i) {
+    Add(setters[i].address, setters[i].name);
   }
 
   StubCache* stub_cache = isolate->stub_cache();
diff --git a/src/factory.cc b/src/factory.cc
index 41c3cb5..7d2dad0 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -190,8 +190,8 @@
 Handle<AccessorPair> Factory::NewAccessorPair() {
   Handle<AccessorPair> accessors =
       Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE));
-  accessors->set_getter(*the_hole_value(), SKIP_WRITE_BARRIER);
-  accessors->set_setter(*the_hole_value(), SKIP_WRITE_BARRIER);
+  accessors->set_getter(*null_value(), SKIP_WRITE_BARRIER);
+  accessors->set_setter(*null_value(), SKIP_WRITE_BARRIER);
   return accessors;
 }
 
@@ -853,15 +853,6 @@
 }
 
 
-Handle<CodeCache> Factory::NewCodeCache() {
-  Handle<CodeCache> code_cache =
-      Handle<CodeCache>::cast(NewStruct(CODE_CACHE_TYPE));
-  code_cache->set_default_cache(*empty_fixed_array(), SKIP_WRITE_BARRIER);
-  code_cache->set_normal_type_cache(*undefined_value(), SKIP_WRITE_BARRIER);
-  return code_cache;
-}
-
-
 Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
     int aliased_context_slot) {
   Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
@@ -894,7 +885,7 @@
   script->set_wrapper(heap->undefined_value());
   script->set_line_ends(heap->undefined_value());
   script->set_eval_from_shared(heap->undefined_value());
-  script->set_eval_from_instructions_offset(0);
+  script->set_eval_from_position(0);
   script->set_shared_function_infos(Smi::FromInt(0));
   script->set_flags(0);
 
@@ -1237,6 +1228,7 @@
       map.is_identical_to(
           isolate()->sloppy_function_with_readonly_prototype_map()) ||
       map.is_identical_to(isolate()->strict_function_map()) ||
+      map.is_identical_to(isolate()->strict_function_without_prototype_map()) ||
       // TODO(titzer): wasm_function_map() could be undefined here. ugly.
       (*map == context->get(Context::WASM_FUNCTION_MAP_INDEX)) ||
       map.is_identical_to(isolate()->proxy_function_map()));
@@ -1411,8 +1403,10 @@
   int obj_size = Code::SizeFor(body_size);
 
   Handle<Code> code = NewCodeRaw(obj_size, immovable);
-  DCHECK(isolate()->code_range() == NULL || !isolate()->code_range()->valid() ||
-         isolate()->code_range()->contains(code->address()) ||
+  DCHECK(isolate()->heap()->memory_allocator()->code_range() == NULL ||
+         !isolate()->heap()->memory_allocator()->code_range()->valid() ||
+         isolate()->heap()->memory_allocator()->code_range()->contains(
+             code->address()) ||
          obj_size <= isolate()->heap()->code_space()->AreaSize());
 
   // The code object has not been fully initialized yet.  We rely on the
@@ -1668,7 +1662,7 @@
 
 Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
     Handle<JSFunction> function) {
-  DCHECK(function->shared()->is_generator());
+  DCHECK(function->shared()->is_resumable());
   JSFunction::EnsureHasInitialMap(function);
   Handle<Map> map(function->initial_map());
   DCHECK_EQ(JS_GENERATOR_OBJECT_TYPE, map->instance_type());
@@ -1968,13 +1962,9 @@
   }
 
   // Setup the map for the JSBoundFunction instance.
-  Handle<Map> map = handle(
-      target_function->IsConstructor()
-          ? isolate()->native_context()->bound_function_with_constructor_map()
-          : isolate()
-                ->native_context()
-                ->bound_function_without_constructor_map(),
-      isolate());
+  Handle<Map> map = target_function->IsConstructor()
+                        ? isolate()->bound_function_with_constructor_map()
+                        : isolate()->bound_function_without_constructor_map();
   if (map->prototype() != *prototype) {
     map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
   }
@@ -1986,8 +1976,6 @@
   result->set_bound_target_function(*target_function);
   result->set_bound_this(*bound_this);
   result->set_bound_arguments(*bound_arguments);
-  result->set_length(Smi::FromInt(0));
-  result->set_name(*undefined_value(), SKIP_WRITE_BARRIER);
   return result;
 }
 
@@ -2080,6 +2068,11 @@
     shared->set_instance_class_name(isolate()->heap()->Generator_string());
     shared->DisableOptimization(kGenerator);
   }
+  if (IsAsyncFunction(kind)) {
+    // TODO(caitp): Enable optimization of async functions when they are enabled
+    // for generators functions.
+    shared->DisableOptimization(kGenerator);
+  }
   return shared;
 }
 
@@ -2247,14 +2240,19 @@
       Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
   debug_info->set_shared(*shared);
   if (shared->HasBytecodeArray()) {
-    // Create a copy for debugging.
-    Handle<BytecodeArray> original(shared->bytecode_array(), isolate());
-    Handle<BytecodeArray> copy = CopyBytecodeArray(original);
-    debug_info->set_abstract_code(AbstractCode::cast(*copy));
+    // We need to create a copy, but delay since this may cause heap
+    // verification.
+    debug_info->set_abstract_code(AbstractCode::cast(shared->bytecode_array()));
   } else {
     debug_info->set_abstract_code(AbstractCode::cast(shared->code()));
   }
   debug_info->set_break_points(*break_points);
+  if (shared->HasBytecodeArray()) {
+    // Create a copy for debugging.
+    Handle<BytecodeArray> original(shared->bytecode_array());
+    Handle<BytecodeArray> copy = CopyBytecodeArray(original);
+    debug_info->set_abstract_code(AbstractCode::cast(*copy));
+  }
 
   // Link debug info to function.
   shared->set_debug_info(*debug_info);
diff --git a/src/factory.h b/src/factory.h
index 2fa2901..51ba09d 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -270,8 +270,6 @@
   // the old generation).
   Handle<Struct> NewStruct(InstanceType type);
 
-  Handle<CodeCache> NewCodeCache();
-
   Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
       int aliased_context_slot);
 
@@ -562,6 +560,11 @@
     return NewRangeError(MessageTemplate::kInvalidStringLength);
   }
 
+  Handle<Object> NewURIError() {
+    return NewError(isolate()->uri_error_function(),
+                    MessageTemplate::kURIMalformed);
+  }
+
   Handle<Object> NewError(Handle<JSFunction> constructor,
                           MessageTemplate::Template template_index,
                           Handle<Object> arg0 = Handle<Object>(),
diff --git a/src/fast-accessor-assembler.cc b/src/fast-accessor-assembler.cc
index cd2910c..1fbe1a8 100644
--- a/src/fast-accessor-assembler.cc
+++ b/src/fast-accessor-assembler.cc
@@ -5,12 +5,12 @@
 #include "src/fast-accessor-assembler.h"
 
 #include "src/base/logging.h"
+#include "src/code-stub-assembler.h"
 #include "src/code-stubs.h"  // For CallApiCallbackStub.
-#include "src/compiler/code-stub-assembler.h"
 #include "src/handles-inl.h"
 #include "src/objects.h"  // For FAA::LoadInternalField impl.
 
-using v8::internal::compiler::CodeStubAssembler;
+using v8::internal::CodeStubAssembler;
 using v8::internal::compiler::Node;
 
 namespace v8 {
@@ -56,12 +56,13 @@
   CodeStubAssembler::Variable result(assembler_.get(),
                                      MachineRepresentation::kTagged);
   CodeStubAssembler::Label is_jsobject(assembler_.get());
+  CodeStubAssembler::Label maybe_api_object(assembler_.get());
   CodeStubAssembler::Label is_not_jsobject(assembler_.get());
   CodeStubAssembler::Label merge(assembler_.get(), &result);
   assembler_->Branch(
       assembler_->WordEqual(
           instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
-      &is_jsobject, &is_not_jsobject);
+      &is_jsobject, &maybe_api_object);
 
   // JSObject? Then load the internal field field_no.
   assembler_->Bind(&is_jsobject);
@@ -71,6 +72,12 @@
   result.Bind(internal_field);
   assembler_->Goto(&merge);
 
+  assembler_->Bind(&maybe_api_object);
+  assembler_->Branch(
+      assembler_->WordEqual(instance_type, assembler_->IntPtrConstant(
+                                               Internals::kJSApiObjectType)),
+      &is_jsobject, &is_not_jsobject);
+
   // No JSObject? Return undefined.
   // TODO(vogelheim): Check whether this is the appropriate action, or whether
   //                  the method should take a label instead.
diff --git a/src/fast-accessor-assembler.h b/src/fast-accessor-assembler.h
index 57e72e8..346592e 100644
--- a/src/fast-accessor-assembler.h
+++ b/src/fast-accessor-assembler.h
@@ -14,7 +14,7 @@
 #include "src/handles.h"
 
 // For CodeStubAssembler::Label. (We cannot forward-declare inner classes.)
-#include "src/compiler/code-stub-assembler.h"
+#include "src/code-stub-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -73,9 +73,9 @@
 
  private:
   ValueId FromRaw(compiler::Node* node);
-  LabelId FromRaw(compiler::CodeStubAssembler::Label* label);
+  LabelId FromRaw(CodeStubAssembler::Label* label);
   compiler::Node* FromId(ValueId value) const;
-  compiler::CodeStubAssembler::Label* FromId(LabelId value) const;
+  CodeStubAssembler::Label* FromId(LabelId value) const;
 
   void Clear();
   Zone* zone() { return &zone_; }
@@ -83,13 +83,13 @@
 
   Zone zone_;
   Isolate* isolate_;
-  base::SmartPointer<compiler::CodeStubAssembler> assembler_;
+  base::SmartPointer<CodeStubAssembler> assembler_;
 
   // To prevent exposing the RMA internals to the outside world, we'll map
   // Node + Label pointers integers wrapped in ValueId and LabelId instances.
   // These vectors maintain this mapping.
   std::vector<compiler::Node*> nodes_;
-  std::vector<compiler::CodeStubAssembler::Label*> labels_;
+  std::vector<CodeStubAssembler::Label*> labels_;
 
   // Remember the current state for easy error checking. (We prefer to be
   // strict as this class will be exposed at the API.)
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index c1fffec..841d326 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -184,48 +184,63 @@
 DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
 DEFINE_IMPLICATION(es_staging, harmony)
 
-DEFINE_BOOL(promise_extra, true, "additional V8 Promise functions")
-// Removing extra Promise functions is staged
-DEFINE_NEG_IMPLICATION(harmony, promise_extra)
+DEFINE_BOOL(promise_extra, false, "additional V8 Promise functions")
+// Removing extra Promise functions is shipped
+DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, promise_extra, true)
+
+DEFINE_BOOL(intl_extra, true, "additional V8 Intl functions")
+// Removing extra Intl functions is shipped
+DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, intl_extra, true)
 
 // Activate on ClusterFuzz.
 DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
 DEFINE_IMPLICATION(es_staging, move_object_start)
 
 // Features that are still work in progress (behind individual flags).
+#ifdef V8_I18N_SUPPORT
 #define HARMONY_INPROGRESS(V)                                           \
-  V(harmony_object_observe, "harmony Object.observe")                   \
+  V(harmony_array_prototype_values, "harmony Array.prototype.values")   \
   V(harmony_function_sent, "harmony function.sent")                     \
   V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")             \
   V(harmony_simd, "harmony simd")                                       \
   V(harmony_do_expressions, "harmony do-expressions")                   \
   V(harmony_regexp_property, "harmony unicode regexp property classes") \
-  V(harmony_string_padding, "harmony String-padding methods")
+  V(icu_case_mapping, "case mapping with ICU rather than Unibrow")      \
+  V(harmony_async_await, "harmony async-await")
+#else
+#define HARMONY_INPROGRESS(V)                                           \
+  V(harmony_array_prototype_values, "harmony Array.prototype.values")   \
+  V(harmony_function_sent, "harmony function.sent")                     \
+  V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")             \
+  V(harmony_simd, "harmony simd")                                       \
+  V(harmony_do_expressions, "harmony do-expressions")                   \
+  V(harmony_regexp_property, "harmony unicode regexp property classes") \
+  V(harmony_async_await, "harmony async-await")
+#endif
 
 // Features that are complete (but still behind --harmony/es-staging flag).
 #define HARMONY_STAGED(V)                                                    \
+  V(harmony_for_in, "harmony for-in syntax")                                 \
   V(harmony_regexp_lookbehind, "harmony regexp lookbehind")                  \
   V(harmony_tailcalls, "harmony tail calls")                                 \
+  V(harmony_explicit_tailcalls, "harmony explicit tail calls")               \
   V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
   V(harmony_object_own_property_descriptors,                                 \
     "harmony Object.getOwnPropertyDescriptors()")                            \
-  V(harmony_exponentiation_operator, "harmony exponentiation operator `**`")
+  V(harmony_string_padding, "harmony String-padding methods")
 
 // Features that are shipping (turned on by default, but internal flag remains).
 #define HARMONY_SHIPPING(V)                                           \
-  V(harmony_array_prototype_values, "harmony Array.prototype.values") \
   V(harmony_function_name, "harmony Function name inference")         \
   V(harmony_instanceof, "harmony instanceof support")                 \
   V(harmony_iterator_close, "harmony iterator finalization")          \
   V(harmony_unicode_regexps, "harmony unicode regexps")               \
   V(harmony_regexp_exec, "harmony RegExp exec override behavior")     \
-  V(harmony_sloppy, "harmony features in sloppy mode")                \
-  V(harmony_sloppy_let, "harmony let in sloppy mode")                 \
-  V(harmony_sloppy_function, "harmony sloppy function block scoping") \
   V(harmony_regexp_subclass, "harmony regexp subclassing")            \
   V(harmony_restrictive_declarations,                                 \
     "harmony limitations on sloppy mode function declarations")       \
-  V(harmony_species, "harmony Symbol.species")
+  V(harmony_species, "harmony Symbol.species")                        \
+  V(harmony_exponentiation_operator, "harmony exponentiation operator `**`")
 
 // Once a shipping feature has proved stable in the wild, it will be dropped
 // from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -250,16 +265,14 @@
 HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
 #undef FLAG_SHIPPING_FEATURES
 
-
-// Feature dependencies.
-DEFINE_IMPLICATION(harmony_sloppy_let, harmony_sloppy)
-DEFINE_IMPLICATION(harmony_sloppy_function, harmony_sloppy)
-
 // Flags for experimental implementation features.
 DEFINE_BOOL(compiled_keyed_generic_loads, false,
             "use optimizing compiler to generate keyed generic load stubs")
 DEFINE_BOOL(allocation_site_pretenuring, true,
             "pretenure with allocation sites")
+DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
+DEFINE_INT(page_promotion_threshold, 70,
+           "min percentage of live bytes on a page to enable fast evacuation")
 DEFINE_BOOL(trace_pretenuring, false,
             "trace pretenuring decisions of HAllocate instructions")
 DEFINE_BOOL(trace_pretenuring_statistics, false,
@@ -268,7 +281,6 @@
 DEFINE_BOOL(track_double_fields, true, "track fields with double values")
 DEFINE_BOOL(track_heap_object_fields, true, "track fields with heap values")
 DEFINE_BOOL(track_computed_fields, true, "track computed boilerplate fields")
-DEFINE_BOOL(harmony_instanceof_opt, true, "optimize ES6 instanceof support")
 DEFINE_IMPLICATION(track_double_fields, track_fields)
 DEFINE_IMPLICATION(track_heap_object_fields, track_fields)
 DEFINE_IMPLICATION(track_computed_fields, track_fields)
@@ -291,13 +303,22 @@
 // Flags for Ignition.
 DEFINE_BOOL(ignition, false, "use ignition interpreter")
 DEFINE_BOOL(ignition_eager, true, "eagerly compile and parse with ignition")
+DEFINE_BOOL(ignition_generators, false,
+            "enable experimental ignition support for generators")
 DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
+DEFINE_BOOL(ignition_peephole, true, "use ignition peephole optimizer")
 DEFINE_BOOL(print_bytecode, false,
             "print bytecode generated by ignition interpreter")
 DEFINE_BOOL(trace_ignition, false,
             "trace the bytecodes executed by the ignition interpreter")
 DEFINE_BOOL(trace_ignition_codegen, false,
             "trace the codegen of ignition interpreter bytecode handlers")
+DEFINE_BOOL(trace_ignition_dispatches, false,
+            "traces the dispatches to bytecode handlers by the ignition "
+            "interpreter")
+DEFINE_STRING(trace_ignition_dispatches_output_file, nullptr,
+              "the file to which the bytecode handler dispatch table is "
+              "written (by default, the table is not written to a file)")
 
 // Flags for Crankshaft.
 DEFINE_BOOL(crankshaft, true, "use crankshaft")
@@ -404,6 +425,7 @@
 DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
 DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
 DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
+DEFINE_BOOL(turbo_from_bytecode, false, "enable building graphs from bytecode")
 DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
 DEFINE_BOOL(turbo_sp_frame_access, false,
             "use stack pointer-relative access to frame wherever possible")
@@ -427,7 +449,6 @@
 DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
 DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
 DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
-DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
 DEFINE_BOOL(turbo_source_positions, false,
             "track source code positions when building TurboFan IR")
 DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
@@ -443,7 +464,6 @@
             "verify register allocation in TurboFan")
 DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
 DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
-DEFINE_BOOL(turbo_osr, true, "enable OSR in TurboFan")
 DEFINE_BOOL(turbo_stress_loop_peeling, false,
             "stress loop peeling optimization")
 DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
@@ -458,6 +478,8 @@
 
 // Flags for native WebAssembly.
 DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
+DEFINE_INT(wasm_num_compilation_tasks, 0,
+           "number of parallel compilation tasks for wasm")
 DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
 DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
 DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
@@ -471,10 +493,13 @@
 DEFINE_BOOL(wasm_loop_assignment_analysis, true,
             "perform loop assignment analysis for WASM")
 
+DEFINE_BOOL(validate_asm, false, "validate asm.js modules before compiling")
 DEFINE_BOOL(enable_simd_asmjs, false, "enable SIMD.js in asm.js stdlib")
 
 DEFINE_BOOL(dump_wasm_module, false, "dump WASM module bytes")
 DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
+DEFINE_BOOL(print_wasm_code_size, false,
+            "print the generated code size for each wasm module")
 
 DEFINE_INT(typed_array_max_size_in_heap, 64,
            "threshold for in-heap typed array")
@@ -569,6 +594,8 @@
 DEFINE_BOOL(lazy, true, "use lazy compilation")
 DEFINE_BOOL(trace_opt, false, "trace lazy optimization")
 DEFINE_BOOL(trace_opt_stats, false, "trace lazy optimization statistics")
+DEFINE_BOOL(trace_file_names, false,
+            "include file names in trace-opt/trace-deopt output")
 DEFINE_BOOL(opt, true, "use adaptive optimizations")
 DEFINE_BOOL(always_opt, false, "always try to optimize functions")
 DEFINE_BOOL(always_osr, false, "always try to OSR functions")
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 5ecbd45..998be23 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -199,11 +199,6 @@
 }
 
 
-inline Object* JavaScriptFrame::receiver() const {
-  return GetParameter(-1);
-}
-
-
 inline void JavaScriptFrame::set_receiver(Object* value) {
   Memory::Object_at(GetParameterSlot(-1)) = value;
 }
@@ -214,11 +209,6 @@
 }
 
 
-inline JSFunction* JavaScriptFrame::function() const {
-  return JSFunction::cast(function_slot_object());
-}
-
-
 inline Object* JavaScriptFrame::function_slot_object() const {
   const int offset = JavaScriptFrameConstants::kFunctionOffset;
   return Memory::Object_at(fp() + offset);
@@ -288,6 +278,28 @@
   return static_cast<JavaScriptFrame*>(frame);
 }
 
+inline StandardFrame* StackTraceFrameIterator::frame() const {
+  StackFrame* frame = iterator_.frame();
+  DCHECK(frame->is_java_script() || frame->is_arguments_adaptor() ||
+         frame->is_wasm());
+  return static_cast<StandardFrame*>(frame);
+}
+
+bool StackTraceFrameIterator::is_javascript() const {
+  return frame()->is_java_script();
+}
+
+bool StackTraceFrameIterator::is_wasm() const { return frame()->is_wasm(); }
+
+JavaScriptFrame* StackTraceFrameIterator::javascript_frame() const {
+  DCHECK(is_javascript());
+  return static_cast<JavaScriptFrame*>(frame());
+}
+
+WasmFrame* StackTraceFrameIterator::wasm_frame() const {
+  DCHECK(is_wasm());
+  return static_cast<WasmFrame*>(frame());
+}
 
 inline StackFrame* SafeStackFrameIterator::frame() const {
   DCHECK(!done());
diff --git a/src/frames.cc b/src/frames.cc
index 0e57429..a8fe6bb 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -16,6 +16,7 @@
 #include "src/safepoint-table.h"
 #include "src/string-stream.h"
 #include "src/vm-state-inl.h"
+#include "src/wasm/wasm-module.h"
 
 namespace v8 {
 namespace internal {
@@ -104,17 +105,15 @@
   StackFrame::Type type = ExitFrame::GetStateForFramePointer(
       Isolate::c_entry_fp(top), &state);
   handler_ = StackHandler::FromAddress(Isolate::handler(top));
-  if (SingletonFor(type) == NULL) return;
   frame_ = SingletonFor(type, &state);
 }
 
 
 StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
                                              StackFrame::State* state) {
-  if (type == StackFrame::NONE) return NULL;
   StackFrame* result = SingletonFor(type);
-  DCHECK(result != NULL);
-  result->state_ = *state;
+  DCHECK((!result) == (type == StackFrame::NONE));
+  if (result) result->state_ = *state;
   return result;
 }
 
@@ -162,28 +161,29 @@
 
 // -------------------------------------------------------------------------
 
-
 StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
-    : JavaScriptFrameIterator(isolate) {
-  if (!done() && !IsValidFrame()) Advance();
+    : iterator_(isolate) {
+  if (!done() && !IsValidFrame(iterator_.frame())) Advance();
 }
 
 
 void StackTraceFrameIterator::Advance() {
-  while (true) {
-    JavaScriptFrameIterator::Advance();
-    if (done()) return;
-    if (IsValidFrame()) return;
-  }
+  do {
+    iterator_.Advance();
+  } while (!done() && !IsValidFrame(iterator_.frame()));
 }
 
-
-bool StackTraceFrameIterator::IsValidFrame() {
-    if (!frame()->function()->IsJSFunction()) return false;
-    Object* script = frame()->function()->shared()->script();
+bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
+  if (frame->is_java_script()) {
+    JavaScriptFrame* jsFrame = static_cast<JavaScriptFrame*>(frame);
+    if (!jsFrame->function()->IsJSFunction()) return false;
+    Object* script = jsFrame->function()->shared()->script();
     // Don't show functions from native scripts to user.
     return (script->IsScript() &&
             Script::TYPE_NATIVE != Script::cast(script)->type());
+  }
+  // apart from javascript, only wasm is valid
+  return frame->is_wasm();
 }
 
 
@@ -230,10 +230,8 @@
   } else {
     return;
   }
-  if (SingletonFor(type) == NULL) return;
   frame_ = SingletonFor(type, &state);
-  DCHECK(frame_);
-  Advance();
+  if (frame_) Advance();
 }
 
 
@@ -261,12 +259,8 @@
   // Advance to the previous frame.
   StackFrame::State state;
   StackFrame::Type type = frame_->GetCallerState(&state);
-  if (SingletonFor(type) == NULL) {
-    frame_ = NULL;
-    return;
-  }
   frame_ = SingletonFor(type, &state);
-  DCHECK(frame_);
+  if (!frame_) return;
 
   // Check that we have actually moved to the previous frame in the stack.
   if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
@@ -493,10 +487,10 @@
     case INTERNAL:
     case CONSTRUCT:
     case ARGUMENTS_ADAPTOR:
-      return candidate;
-    case JS_TO_WASM:
     case WASM_TO_JS:
     case WASM:
+      return candidate;
+    case JS_TO_WASM:
     case JAVA_SCRIPT:
     case OPTIMIZED:
     case INTERPRETED:
@@ -624,7 +618,6 @@
   state->constant_pool_address = NULL;
 }
 
-
 Address StandardFrame::GetExpressionAddress(int n) const {
   const int offset = StandardFrameConstants::kExpressionsOffset;
   return fp() + offset - n * kPointerSize;
@@ -711,7 +704,8 @@
       (frame_header_size + StandardFrameConstants::kFixedFrameSizeAboveFp);
 
   Object** frame_header_base = &Memory::Object_at(fp() - frame_header_size);
-  Object** frame_header_limit = &Memory::Object_at(fp());
+  Object** frame_header_limit =
+      &Memory::Object_at(fp() - StandardFrameConstants::kCPSlotSize);
   Object** parameters_base = &Memory::Object_at(sp());
   Object** parameters_limit = frame_header_base - slot_space / kPointerSize;
 
@@ -854,17 +848,23 @@
   functions->Add(function());
 }
 
-
-void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
+void JavaScriptFrame::Summarize(List<FrameSummary>* functions,
+                                FrameSummary::Mode mode) const {
   DCHECK(functions->length() == 0);
   Code* code = LookupCode();
   int offset = static_cast<int>(pc() - code->instruction_start());
   AbstractCode* abstract_code = AbstractCode::cast(code);
   FrameSummary summary(receiver(), function(), abstract_code, offset,
-                       IsConstructor());
+                       IsConstructor(), mode);
   functions->Add(summary);
 }
 
+JSFunction* JavaScriptFrame::function() const {
+  return JSFunction::cast(function_slot_object());
+}
+
+Object* JavaScriptFrame::receiver() const { return GetParameter(-1); }
+
 int JavaScriptFrame::LookupExceptionHandlerInTable(
     int* stack_depth, HandlerTable::CatchPrediction* prediction) {
   Code* code = LookupCode();
@@ -944,16 +944,6 @@
   }
 }
 
-
-void JavaScriptFrame::RestoreOperandStack(FixedArray* store) {
-  int operands_count = store->length();
-  DCHECK_LE(operands_count, ComputeOperandsCount());
-  for (int i = 0; i < operands_count; i++) {
-    DCHECK_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
-    Memory::Object_at(GetOperandSlot(i)) = store->get(i);
-  }
-}
-
 namespace {
 
 bool CannotDeoptFromAsmCode(Code* code, JSFunction* function) {
@@ -965,7 +955,7 @@
 
 FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
                            AbstractCode* abstract_code, int code_offset,
-                           bool is_constructor)
+                           bool is_constructor, Mode mode)
     : receiver_(receiver, function->GetIsolate()),
       function_(function),
       abstract_code_(abstract_code),
@@ -973,7 +963,14 @@
       is_constructor_(is_constructor) {
   DCHECK(abstract_code->IsBytecodeArray() ||
          Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION ||
-         CannotDeoptFromAsmCode(Code::cast(abstract_code), function));
+         CannotDeoptFromAsmCode(Code::cast(abstract_code), function) ||
+         mode == kApproximateSummary);
+}
+
+FrameSummary FrameSummary::GetFirst(JavaScriptFrame* frame) {
+  List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+  frame->Summarize(&frames);
+  return frames.first();
 }
 
 void FrameSummary::Print() {
@@ -987,8 +984,12 @@
     Code* code = abstract_code_->GetCode();
     if (code->kind() == Code::FUNCTION) PrintF(" UNOPT ");
     if (code->kind() == Code::OPTIMIZED_FUNCTION) {
-      DCHECK(CannotDeoptFromAsmCode(code, *function()));
-      PrintF(" ASM ");
+      if (function()->shared()->asm_function()) {
+        DCHECK(CannotDeoptFromAsmCode(code, *function()));
+        PrintF(" ASM ");
+      } else {
+        PrintF(" OPT (approximate)");
+      }
     }
   } else {
     PrintF(" BYTECODE ");
@@ -996,8 +997,8 @@
   PrintF("\npc: %d\n", code_offset_);
 }
 
-
-void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
+void OptimizedFrame::Summarize(List<FrameSummary>* frames,
+                               FrameSummary::Mode mode) const {
   DCHECK(frames->length() == 0);
   DCHECK(is_optimized());
 
@@ -1012,6 +1013,13 @@
   DisallowHeapAllocation no_gc;
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
+  if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
+    DCHECK(data == nullptr);
+    if (mode == FrameSummary::kApproximateSummary) {
+      return JavaScriptFrame::Summarize(frames, mode);
+    }
+    FATAL("Missing deoptimization information for OptimizedFrame::Summarize.");
+  }
   FixedArray* const literal_array = data->LiteralArray();
 
   TranslationIterator it(data->TranslationByteArray(),
@@ -1134,9 +1142,10 @@
 
   SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
   *deopt_index = safepoint_entry.deoptimization_index();
-  DCHECK(*deopt_index != Safepoint::kNoDeoptimizationIndex);
-
-  return DeoptimizationInputData::cast(code->deoptimization_data());
+  if (*deopt_index != Safepoint::kNoDeoptimizationIndex) {
+    return DeoptimizationInputData::cast(code->deoptimization_data());
+  }
+  return nullptr;
 }
 
 
@@ -1155,6 +1164,8 @@
   DisallowHeapAllocation no_gc;
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
+  DCHECK_NOT_NULL(data);
+  DCHECK_NE(Safepoint::kNoDeoptimizationIndex, deopt_index);
   FixedArray* const literal_array = data->LiteralArray();
 
   TranslationIterator it(data->TranslationByteArray(),
@@ -1228,15 +1239,15 @@
   SetExpression(index, Smi::FromInt(raw_offset));
 }
 
-Object* InterpretedFrame::GetBytecodeArray() const {
+BytecodeArray* InterpretedFrame::GetBytecodeArray() const {
   const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
   DCHECK_EQ(
       InterpreterFrameConstants::kBytecodeArrayFromFp,
       InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
-  return GetExpression(index);
+  return BytecodeArray::cast(GetExpression(index));
 }
 
-void InterpretedFrame::PatchBytecodeArray(Object* bytecode_array) {
+void InterpretedFrame::PatchBytecodeArray(BytecodeArray* bytecode_array) {
   const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
   DCHECK_EQ(
       InterpreterFrameConstants::kBytecodeArrayFromFp,
@@ -1244,15 +1255,25 @@
   SetExpression(index, bytecode_array);
 }
 
-Object* InterpretedFrame::GetInterpreterRegister(int register_index) const {
+Object* InterpretedFrame::ReadInterpreterRegister(int register_index) const {
   const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
   DCHECK_EQ(
-      InterpreterFrameConstants::kRegisterFilePointerFromFp,
+      InterpreterFrameConstants::kRegisterFileFromFp,
       InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
   return GetExpression(index + register_index);
 }
 
-void InterpretedFrame::Summarize(List<FrameSummary>* functions) {
+void InterpretedFrame::WriteInterpreterRegister(int register_index,
+                                                Object* value) {
+  const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
+  DCHECK_EQ(
+      InterpreterFrameConstants::kRegisterFileFromFp,
+      InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+  return SetExpression(index + register_index, value);
+}
+
+void InterpretedFrame::Summarize(List<FrameSummary>* functions,
+                                 FrameSummary::Mode mode) const {
   DCHECK(functions->length() == 0);
   AbstractCode* abstract_code =
       AbstractCode::cast(function()->shared()->bytecode_array());
@@ -1315,6 +1336,32 @@
   return fp() + ExitFrameConstants::kCallerSPOffset;
 }
 
+Object* WasmFrame::wasm_obj() {
+  FixedArray* deopt_data = LookupCode()->deoptimization_data();
+  DCHECK(deopt_data->length() == 2);
+  return deopt_data->get(0);
+}
+
+uint32_t WasmFrame::function_index() {
+  FixedArray* deopt_data = LookupCode()->deoptimization_data();
+  DCHECK(deopt_data->length() == 2);
+  Object* func_index_obj = deopt_data->get(1);
+  if (func_index_obj->IsUndefined()) return static_cast<uint32_t>(-1);
+  if (func_index_obj->IsSmi()) return Smi::cast(func_index_obj)->value();
+  DCHECK(func_index_obj->IsHeapNumber());
+  uint32_t val = static_cast<uint32_t>(-1);
+  func_index_obj->ToUint32(&val);
+  DCHECK(val != static_cast<uint32_t>(-1));
+  return val;
+}
+
+Object* WasmFrame::function_name() {
+  Object* wasm_object = wasm_obj();
+  if (wasm_object->IsUndefined()) return wasm_object;
+  Handle<JSObject> wasm = handle(JSObject::cast(wasm_object));
+  return *wasm::GetWasmFunctionName(wasm, function_index());
+}
+
 namespace {
 
 
@@ -1364,14 +1411,20 @@
       int offset = static_cast<int>(pc - code->instruction_start());
       int source_pos = code->SourcePosition(offset);
       int line = script->GetLineNumber(source_pos) + 1;
-      accumulator->Add(":%d", line);
+      accumulator->Add(":%d] [pc=%p]", line, pc);
+    } else if (is_interpreted()) {
+      const InterpretedFrame* iframe =
+          reinterpret_cast<const InterpretedFrame*>(this);
+      BytecodeArray* bytecodes = iframe->GetBytecodeArray();
+      int offset = iframe->GetBytecodeOffset();
+      int source_pos = bytecodes->SourcePosition(offset);
+      int line = script->GetLineNumber(source_pos) + 1;
+      accumulator->Add(":%d] [bytecode=%p offset=%d]", line, bytecodes, offset);
     } else {
       int function_start_pos = shared->start_position();
       int line = script->GetLineNumber(function_start_pos) + 1;
-      accumulator->Add(":~%d", line);
+      accumulator->Add(":~%d] [pc=%p]", line, pc);
     }
-
-    accumulator->Add("] [pc=%p] ", pc);
   }
 
   accumulator->Add("(this=%o", receiver);
@@ -1632,7 +1685,8 @@
   Page* page = Page::FromAddress(inner_pointer);
 
   DCHECK_EQ(page->owner(), heap->code_space());
-  heap->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(page);
+  heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
+      page);
 
   Address addr = page->skip_list()->StartFor(inner_pointer);
 
diff --git a/src/frames.h b/src/frames.h
index f6806d7..4163d6f 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -324,31 +324,22 @@
       StandardFrameConstants::kFixedFrameSizeFromFp + 3 * kPointerSize;
 
   // FP-relative.
+  static const int kLastParamFromFp = StandardFrameConstants::kCallerSPOffset;
   static const int kNewTargetFromFp =
       -StandardFrameConstants::kFixedFrameSizeFromFp - 1 * kPointerSize;
   static const int kBytecodeArrayFromFp =
       -StandardFrameConstants::kFixedFrameSizeFromFp - 2 * kPointerSize;
   static const int kBytecodeOffsetFromFp =
       -StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
-  static const int kRegisterFilePointerFromFp =
+  static const int kRegisterFileFromFp =
       -StandardFrameConstants::kFixedFrameSizeFromFp - 4 * kPointerSize;
 
-  static const int kExpressionsOffset = kRegisterFilePointerFromFp;
+  static const int kExpressionsOffset = kRegisterFileFromFp;
 
   // Expression index for {StandardFrame::GetExpressionAddress}.
   static const int kBytecodeArrayExpressionIndex = -2;
   static const int kBytecodeOffsetExpressionIndex = -1;
   static const int kRegisterFileExpressionIndex = 0;
-
-  // Register file pointer relative.
-  static const int kLastParamFromRegisterPointer =
-      StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
-
-  static const int kBytecodeOffsetFromRegisterPointer = 1 * kPointerSize;
-  static const int kBytecodeArrayFromRegisterPointer = 2 * kPointerSize;
-  static const int kNewTargetFromRegisterPointer = 3 * kPointerSize;
-  static const int kFunctionFromRegisterPointer = 4 * kPointerSize;
-  static const int kContextFromRegisterPointer = 5 * kPointerSize;
 };
 
 inline static int FPOffsetToFrameSlot(int frame_offset) {
@@ -640,8 +631,40 @@
   friend class StackFrameIteratorBase;
 };
 
+class JavaScriptFrame;
 
-class StandardFrame: public StackFrame {
+class FrameSummary BASE_EMBEDDED {
+ public:
+  // Mode for JavaScriptFrame::Summarize. Exact summary is required to produce
+  // an exact stack trace. It will trigger an assertion failure if that is not
+  // possible, e.g., because of missing deoptimization information. The
+  // approximate mode should produce a summary even without deoptimization
+  // information, but it might miss frames.
+  enum Mode { kExactSummary, kApproximateSummary };
+
+  FrameSummary(Object* receiver, JSFunction* function,
+               AbstractCode* abstract_code, int code_offset,
+               bool is_constructor, Mode mode = kExactSummary);
+
+  static FrameSummary GetFirst(JavaScriptFrame* frame);
+
+  Handle<Object> receiver() { return receiver_; }
+  Handle<JSFunction> function() { return function_; }
+  Handle<AbstractCode> abstract_code() { return abstract_code_; }
+  int code_offset() { return code_offset_; }
+  bool is_constructor() { return is_constructor_; }
+
+  void Print();
+
+ private:
+  Handle<Object> receiver_;
+  Handle<JSFunction> function_;
+  Handle<AbstractCode> abstract_code_;
+  int code_offset_;
+  bool is_constructor_;
+};
+
+class StandardFrame : public StackFrame {
  public:
   // Testers.
   bool is_standard() const override { return true; }
@@ -701,36 +724,19 @@
   friend class SafeStackFrameIterator;
 };
 
-
-class FrameSummary BASE_EMBEDDED {
- public:
-  FrameSummary(Object* receiver, JSFunction* function,
-               AbstractCode* abstract_code, int code_offset,
-               bool is_constructor);
-
-  Handle<Object> receiver() { return receiver_; }
-  Handle<JSFunction> function() { return function_; }
-  Handle<AbstractCode> abstract_code() { return abstract_code_; }
-  int code_offset() { return code_offset_; }
-  bool is_constructor() { return is_constructor_; }
-
-  void Print();
-
- private:
-  Handle<Object> receiver_;
-  Handle<JSFunction> function_;
-  Handle<AbstractCode> abstract_code_;
-  int code_offset_;
-  bool is_constructor_;
-};
-
 class JavaScriptFrame : public StandardFrame {
  public:
   Type type() const override { return JAVA_SCRIPT; }
 
+  // Build a list with summaries for this frame including all inlined frames.
+  virtual void Summarize(
+      List<FrameSummary>* frames,
+      FrameSummary::Mode mode = FrameSummary::kExactSummary) const;
+
   // Accessors.
-  inline JSFunction* function() const;
-  inline Object* receiver() const;
+  virtual JSFunction* function() const;
+  virtual Object* receiver() const;
+
   inline void set_receiver(Object* value);
 
   // Access the parameters.
@@ -747,7 +753,6 @@
 
   // Generator support to preserve operand stack.
   void SaveOperandStack(FixedArray* store) const;
-  void RestoreOperandStack(FixedArray* store);
 
   // Debugger access.
   void SetParameterValue(int index, Object* value) const;
@@ -778,9 +783,6 @@
   // Return a list with JSFunctions of this frame.
   virtual void GetFunctions(List<JSFunction*>* functions) const;
 
-  // Build a list with summaries for this frame including all inlined frames.
-  virtual void Summarize(List<FrameSummary>* frames);
-
   // Lookup exception handler for current {pc}, returns -1 if none found. Also
   // returns data associated with the handler site specific to the frame type:
   //  - JavaScriptFrame : Data is the stack depth at entry of the try-block.
@@ -857,7 +859,9 @@
   // is the top-most activation)
   void GetFunctions(List<JSFunction*>* functions) const override;
 
-  void Summarize(List<FrameSummary>* frames) override;
+  void Summarize(
+      List<FrameSummary>* frames,
+      FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
 
   // Lookup exception handler for current {pc}, returns -1 if none found.
   int LookupExceptionHandlerInTable(
@@ -893,17 +897,20 @@
   void PatchBytecodeOffset(int new_offset);
 
   // Returns the frame's current bytecode array.
-  Object* GetBytecodeArray() const;
+  BytecodeArray* GetBytecodeArray() const;
 
   // Updates the frame's BytecodeArray with |bytecode_array|. Used by the
   // debugger to swap execution onto a BytecodeArray patched with breakpoints.
-  void PatchBytecodeArray(Object* bytecode_array);
+  void PatchBytecodeArray(BytecodeArray* bytecode_array);
 
   // Access to the interpreter register file for this frame.
-  Object* GetInterpreterRegister(int register_index) const;
+  Object* ReadInterpreterRegister(int register_index) const;
+  void WriteInterpreterRegister(int register_index, Object* value);
 
   // Build a list with summaries for this frame including all inlined frames.
-  void Summarize(List<FrameSummary>* frames) override;
+  void Summarize(
+      List<FrameSummary>* frames,
+      FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
 
  protected:
   inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
@@ -961,6 +968,11 @@
   // Determine the code for the frame.
   Code* unchecked_code() const override;
 
+  Object* wasm_obj();
+  uint32_t function_index();
+
+  Object* function_name();
+
   static WasmFrame* cast(StackFrame* frame) {
     DCHECK(frame->is_wasm());
     return static_cast<WasmFrame*>(frame);
@@ -1143,17 +1155,25 @@
   StackFrameIterator iterator_;
 };
 
-// NOTE: The stack trace frame iterator is an iterator that only
-// traverse proper JavaScript frames; that is JavaScript frames that
-// have proper JavaScript functions. This excludes the problematic
-// functions in runtime.js.
-class StackTraceFrameIterator: public JavaScriptFrameIterator {
+// NOTE: The stack trace frame iterator is an iterator that only traverse proper
+// JavaScript frames that have proper JavaScript functions and WASM frames.
+// This excludes the problematic functions in runtime.js.
+class StackTraceFrameIterator BASE_EMBEDDED {
  public:
   explicit StackTraceFrameIterator(Isolate* isolate);
+  bool done() const { return iterator_.done(); }
   void Advance();
 
+  inline StandardFrame* frame() const;
+
+  inline bool is_javascript() const;
+  inline bool is_wasm() const;
+  inline JavaScriptFrame* javascript_frame() const;
+  inline WasmFrame* wasm_frame() const;
+
  private:
-  bool IsValidFrame();
+  StackFrameIterator iterator_;
+  bool IsValidFrame(StackFrame* frame) const;
 };
 
 
diff --git a/src/full-codegen/arm/full-codegen-arm.cc b/src/full-codegen/arm/full-codegen-arm.cc
index 81c5ff2..91253e3 100644
--- a/src/full-codegen/arm/full-codegen-arm.cc
+++ b/src/full-codegen/arm/full-codegen-arm.cc
@@ -176,7 +176,8 @@
       __ push(r1);
       __ Push(info->scope()->GetScopeInfo(info->isolate()));
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -232,7 +233,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -294,7 +296,8 @@
   }
 
   // Visit the declarations and body.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -307,7 +310,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
     __ cmp(sp, Operand(ip));
@@ -405,11 +409,11 @@
   EmitProfilingCounterReset();
 
   __ bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -468,6 +472,9 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -728,7 +735,7 @@
 
   Label skip;
   if (should_normalize) __ b(&skip);
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
     __ cmp(r0, ip);
@@ -761,15 +768,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(),
-                    zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -788,7 +793,7 @@
         __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
         __ str(r0, ContextMemOperand(cp, variable->index()));
         // No write barrier since the_hole_value is in old space.
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+        PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -809,6 +814,7 @@
       __ Push(r2, r0);
       __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -854,7 +860,7 @@
                                 kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET,
                                 OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -866,6 +872,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -897,7 +904,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -946,7 +953,7 @@
 
     Label skip;
     __ b(&skip);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
     __ cmp(r0, ip);
     __ b(ne, &next_test);
@@ -975,12 +982,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -1013,15 +1020,13 @@
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
   __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ push(r0);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime;
   __ CheckEnumCache(&call_runtime);
 
@@ -1035,7 +1040,7 @@
   __ bind(&call_runtime);
   __ push(r0);  // Duplicate the enumerable object on the stack.
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
 
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
@@ -1076,7 +1081,7 @@
   __ Push(r1, r0);  // Smi and array
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
   __ Push(r1);  // Fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ mov(r0, Operand(Smi::FromInt(0)));
   __ Push(r0);  // Initial index.
 
@@ -1118,7 +1123,7 @@
   __ push(r1);  // Enumerable.
   __ push(r3);  // Current entry.
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ mov(r3, Operand(r0));
   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   __ cmp(r0, ip);
@@ -1131,11 +1136,11 @@
   // Perform the assignment as if via '='.
   { EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1154,7 +1159,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1284,17 +1289,12 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-      if (local->mode() == CONST_LEGACY) {
-        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-      } else {  // LET || CONST
-        __ b(ne, done);
-        __ mov(r0, Operand(var->name()));
-        __ push(r0);
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ b(ne, done);
+      __ mov(r0, Operand(var->name()));
+      __ push(r0);
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ jmp(done);
   }
@@ -1318,7 +1318,7 @@
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1351,10 +1351,6 @@
           __ push(r0);
           __ CallRuntime(Runtime::kThrowReferenceError);
           __ bind(&done);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
         }
         context()->Plug(r0);
         break;
@@ -1427,8 +1423,9 @@
   } else {
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in r0.
@@ -1464,7 +1461,7 @@
             __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1498,7 +1495,7 @@
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
 
       case ObjectLiteral::Property::GETTER:
@@ -1556,7 +1553,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1629,7 +1626,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1659,7 +1656,8 @@
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1679,7 +1677,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1694,7 +1693,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1763,23 +1761,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1799,7 +1801,7 @@
     }
 
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1811,7 +1813,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(r0);
       break;
     case NAMED_PROPERTY:
@@ -1840,21 +1842,26 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ jmp(&suspend);
   __ bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, r0 holds the generator object.
   __ RecordGeneratorContinuation();
-  __ pop(r1);
-  __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
-  __ b(ne, &resume);
-  __ push(result_register());
+  __ ldr(r1, FieldMemOperand(r0, JSGeneratorObject::kResumeModeOffset));
+  __ ldr(r0, FieldMemOperand(r0, JSGeneratorObject::kInputOffset));
+  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+  __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
+  __ b(lt, &resume);
+  __ Push(result_register());
+  __ b(gt, &exception);
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -1870,7 +1877,7 @@
   __ b(eq, &post_runtime);
   __ push(r0);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ bind(&post_runtime);
   PopOperand(result_register());
   EmitReturnSequence();
@@ -1879,113 +1886,6 @@
   context()->Plug(result_register());
 }
 
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
-    Expression *value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in r0, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // r1 will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(r1);
-
-  // Store input value into generator object.
-  __ str(result_register(),
-         FieldMemOperand(r1, JSGeneratorObject::kInputOffset));
-  __ mov(r2, result_register());
-  __ RecordWriteField(r1, JSGeneratorObject::kInputOffset, r2, r3,
-                      kLRHasBeenSaved, kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
-  __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
-
-  // Load receiver and store as the first argument.
-  __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
-  __ push(r2);
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r3,
-         FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
-  Label push_argument_holes, push_frame;
-  __ bind(&push_argument_holes);
-  __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
-  __ b(mi, &push_frame);
-  __ push(r2);
-  __ jmp(&push_argument_holes);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ bl(&resume_frame);
-  __ jmp(&done);
-  __ bind(&resume_frame);
-  // lr = return address.
-  // fp = caller's frame pointer.
-  // pp = caller's constant pool (if FLAG_enable_embedded_constant_pool),
-  // cp = callee's context,
-  // r4 = callee's JS function.
-  __ PushStandardFrame(r4);
-
-  // Load the operand stack size.
-  __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
-  __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset));
-  __ SmiUntag(r3);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ cmp(r3, Operand(0));
-    __ b(ne, &slow_resume);
-    __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
-
-    { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
-      if (FLAG_enable_embedded_constant_pool) {
-        // Load the new code object's constant pool pointer.
-        __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
-      }
-
-      __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
-      __ SmiUntag(r2);
-      __ add(r3, r3, r2);
-      __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-      __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
-      __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-      __ Jump(r3);
-    }
-    __ bind(&slow_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label push_operand_holes, call_resume;
-  __ bind(&push_operand_holes);
-  __ sub(r3, r3, Operand(1), SetCC);
-  __ b(mi, &call_resume);
-  __ push(r2);
-  __ b(&push_operand_holes);
-  __ bind(&call_resume);
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  DCHECK(!result_register().is(r1));
-  __ Push(r1, result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ stop("not-reached");
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
   OperandStackDepthIncrement(2);
   __ Push(reg1, reg2);
@@ -2009,7 +1909,8 @@
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
 
-  __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ b(&done_allocate);
 
   __ bind(&allocate);
@@ -2336,8 +2237,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2359,25 +2259,6 @@
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
 
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ push(r0);
-      __ mov(r0, Operand(var->name()));
-      __ Push(cp, r0);  // Context and name.
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, r1);
-      __ ldr(r2, location);
-      __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
-      __ b(ne, &skip);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2400,7 +2281,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r0);
 }
 
@@ -2445,44 +2326,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), r0);
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ Move(LoadDescriptor::NameRegister(), r0);
-      PopOperand(LoadDescriptor::ReceiverRegister());
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r0);
 }
 
@@ -2506,7 +2350,7 @@
   if (callee->IsVariableProxy()) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
@@ -2519,7 +2363,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     __ ldr(ip, MemOperand(sp, 0));
     PushOperand(ip);
@@ -2558,6 +2403,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ str(r0, MemOperand(sp, kPointerSize));
@@ -2582,7 +2428,8 @@
   __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
   __ Move(LoadDescriptor::NameRegister(), r0);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   __ ldr(ip, MemOperand(sp, 0));
@@ -2618,6 +2465,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ str(r0, MemOperand(sp, kPointerSize));
@@ -2637,7 +2485,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
@@ -2658,13 +2506,12 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, r0);
 }
 
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
   // r4: copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
@@ -2681,8 +2528,11 @@
   // r1: the start position of the scope the calls resides in.
   __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
 
+  // r0: the source position of the eval call.
+  __ mov(r0, Operand(Smi::FromInt(expr->position())));
+
   // Do the runtime call.
-  __ Push(r4, r3, r2, r1);
+  __ Push(r4, r3, r2, r1, r0);
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
 }
 
@@ -2703,7 +2553,7 @@
     __ Push(callee->name());
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperands(r0, r1);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the
     // function and receiver and have the slow path jump around this
@@ -2731,7 +2581,7 @@
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
   // In a call to eval, we first call
-  // RuntimeHidden_asResolvePossiblyDirectEval to resolve the function we need
+  // Runtime_ResolvePossiblyDirectEval to resolve the function we need
   // to call.  Then we call the resolved function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2747,12 +2597,12 @@
   // resolve eval.
   __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ push(r1);
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the stack with the resolved function.
   __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   // Record source position for debugger.
   SetCallPosition(expr);
@@ -2763,8 +2613,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, r0);
 }
 
@@ -2803,9 +2652,8 @@
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(r0);
 }
 
@@ -2848,9 +2696,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->Plug(r0);
 }
 
@@ -3239,7 +3085,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to r1.
   int const argc = args->length() - 2;
   __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3247,8 +3093,7 @@
   __ mov(r0, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, r0);
 }
@@ -3298,12 +3143,6 @@
   context()->Plug(r0);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r0);
-  context()->Plug(r0);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3323,7 +3162,8 @@
 
   Label runtime, done;
 
-  __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime,
+              NO_ALLOCATION_FLAGS);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
   __ pop(r3);
   __ pop(r2);
@@ -3365,9 +3205,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 
@@ -3451,12 +3289,14 @@
                         &materialize_true);
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(r0, Heap::kTrueValueRootIndex);
         if (context()->IsStackValue()) __ push(r0);
         __ jmp(&done);
         __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(r0, Heap::kFalseValueRootIndex);
         if (context()->IsStackValue()) __ push(r0);
         __ bind(&done);
@@ -3558,9 +3398,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3609,7 +3449,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3656,7 +3496,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(r0);
         }
         // For all contexts except EffectConstant We have the result on
@@ -3667,7 +3508,8 @@
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(r0);
       }
       break;
@@ -3677,7 +3519,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3716,7 +3558,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3813,7 +3655,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3833,7 +3674,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r0, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -3841,6 +3683,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       PopOperand(r1);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
@@ -3852,6 +3695,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cond = CompareIC::ComputeCondition(op);
       PopOperand(r1);
 
diff --git a/src/full-codegen/arm64/full-codegen-arm64.cc b/src/full-codegen/arm64/full-codegen-arm64.cc
index aa67117..61cb141 100644
--- a/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -179,7 +179,8 @@
       __ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
       __ Push(x1, x10);
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -235,7 +236,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -297,7 +299,8 @@
   }
 
   // Visit the declarations and body.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -310,7 +313,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     DCHECK(jssp.Is(__ StackPointer()));
     __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
@@ -393,11 +397,11 @@
   EmitProfilingCounterReset();
 
   __ Bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -460,6 +464,9 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -725,7 +732,7 @@
   if (should_normalize) {
     __ B(&skip);
   }
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ CompareRoot(x0, Heap::kTrueValueRootIndex);
     Split(eq, if_true, if_false, NULL);
@@ -757,16 +764,14 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
 
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(),
-                    zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -785,7 +790,7 @@
         __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
         __ Str(x10, ContextMemOperand(cp, variable->index()));
         // No write barrier since the_hole_value is in old space.
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+        PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -807,6 +812,7 @@
       }
       __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -852,7 +858,7 @@
                                 kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET,
                                 OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -864,6 +870,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -900,7 +907,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -947,7 +954,7 @@
 
     Label skip;
     __ B(&skip);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
     __ Drop(1);
     __ B(clause->body_target());
@@ -973,12 +980,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ Bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ Bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -1011,15 +1018,13 @@
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
   __ Bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ Push(x0);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime;
   __ CheckEnumCache(x0, x15, x10, x11, x12, x13, &call_runtime);
 
@@ -1033,7 +1038,7 @@
   __ Bind(&call_runtime);
   __ Push(x0);  // Duplicate the enumerable object on the stack.
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
 
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
@@ -1069,7 +1074,7 @@
   __ Mov(x1, Smi::FromInt(1));  // Smi(1) indicates slow check.
   __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
   __ Push(x1, x0, x2);  // Smi and array, fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ Push(xzr);  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1109,7 +1114,7 @@
   // just skip it.
   __ Push(x1, x3);
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ Mov(x3, x0);
   __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex,
                 loop_statement.continue_label());
@@ -1121,11 +1126,11 @@
   // Perform the assignment as if via '='.
   { EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1145,7 +1150,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ Bind(&exit);
   decrement_loop_depth();
 }
@@ -1270,16 +1275,11 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
-      if (local->mode() == CONST_LEGACY) {
-        __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
-      } else {  // LET || CONST
-        __ Mov(x0, Operand(var->name()));
-        __ Push(x0);
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ Mov(x0, Operand(var->name()));
+      __ Push(x0);
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ B(done);
   }
@@ -1303,7 +1303,7 @@
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1336,11 +1336,6 @@
           __ Push(x0);
           __ CallRuntime(Runtime::kThrowReferenceError);
           __ Bind(&done);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
-          __ Bind(&done);
         }
         context()->Plug(x0);
         break;
@@ -1414,8 +1409,9 @@
   } else {
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in x0.
@@ -1451,7 +1447,7 @@
             __ Peek(StoreDescriptor::ReceiverRegister(), 0);
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1484,7 +1480,7 @@
         VisitForStackValue(value);
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
@@ -1541,7 +1537,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1612,7 +1608,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1642,7 +1638,8 @@
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1662,7 +1659,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1677,7 +1675,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1743,23 +1740,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1779,7 +1780,7 @@
     }
 
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1791,7 +1792,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(x0);
       break;
     case NAMED_PROPERTY:
@@ -2127,8 +2128,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2149,23 +2149,6 @@
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
 
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ Mov(x1, Operand(var->name()));
-      __ Push(x0, cp, x1);
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackLocal() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, x1);
-      __ Ldr(x10, location);
-      __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ Bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2189,7 +2172,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(x0);
 }
 
@@ -2237,43 +2220,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), x0);
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ Move(LoadDescriptor::NameRegister(), x0);
-      PopOperand(LoadDescriptor::ReceiverRegister());
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(x0);
 }
 
@@ -2297,7 +2244,7 @@
   if (callee->IsVariableProxy()) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
@@ -2314,7 +2261,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ Peek(LoadDescriptor::ReceiverRegister(), 0);
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     PopOperand(x10);
     PushOperands(x0, x10);
@@ -2354,6 +2302,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ Poke(x0, kPointerSize);
@@ -2379,7 +2328,8 @@
   __ Peek(LoadDescriptor::ReceiverRegister(), 0);
   __ Move(LoadDescriptor::NameRegister(), x0);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   PopOperand(x10);
@@ -2415,6 +2365,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ Poke(x0, kPointerSize);
@@ -2435,7 +2386,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
@@ -2456,13 +2407,12 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, x0);
 }
 
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
   ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
   // Prepare to push a copy of the first argument or undefined if it doesn't
   // exist.
@@ -2478,9 +2428,11 @@
   __ Mov(x11, Smi::FromInt(language_mode()));
   // Prepare to push the start position of the scope the calls resides in.
   __ Mov(x12, Smi::FromInt(scope()->start_position()));
+  // Prepare to push the source position of the eval call.
+  __ Mov(x13, Smi::FromInt(expr->position()));
 
   // Push.
-  __ Push(x9, x10, x11, x12);
+  __ Push(x9, x10, x11, x12, x13);
 
   // Do the runtime call.
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
@@ -2503,7 +2455,7 @@
     __ Push(callee->name());
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperands(x0, x1);  // Receiver, function.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the
     // function and receiver and have the slow path jump around this
@@ -2530,7 +2482,7 @@
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitPossiblyEvalCall");
-  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
   // to resolve the function we need to call.  Then we call the resolved
   // function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2547,12 +2499,12 @@
   // resolve eval.
   __ Peek(x10, (arg_count + 1) * kPointerSize);
   __ Push(x10);
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the stack with the resolved function.
   __ Poke(x0, (arg_count + 1) * kPointerSize);
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   // Record source position for debugger.
   SetCallPosition(expr);
@@ -2565,8 +2517,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, x0);
 }
 
@@ -2605,9 +2556,8 @@
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(x0);
 }
 
@@ -2651,9 +2601,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->Plug(x0);
 }
 
@@ -3042,7 +2990,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to x1.
   int const argc = args->length() - 2;
   __ Peek(x1, (argc + 1) * kXRegSize);
@@ -3050,8 +2998,7 @@
   __ Mov(x0, argc);
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, x0);
 }
@@ -3101,12 +3048,6 @@
   context()->Plug(x0);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, x0);
-  context()->Plug(x0);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3127,7 +3068,8 @@
   Label runtime, done;
 
   Register result = x0;
-  __ Allocate(JSIteratorResult::kSize, result, x10, x11, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, result, x10, x11, &runtime,
+              NO_ALLOCATION_FLAGS);
   Register map_reg = x1;
   Register result_value = x2;
   Register boolean_done = x3;
@@ -3179,9 +3121,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 
@@ -3263,12 +3203,14 @@
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
 
         __ Bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
         __ B(&done);
 
         __ Bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
         __ B(&done);
 
@@ -3368,9 +3310,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3419,7 +3361,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3468,7 +3410,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(x0);
         }
         // For all contexts except EffectConstant We have the result on
@@ -3479,7 +3422,8 @@
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(x0);
       }
       break;
@@ -3489,7 +3433,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3528,7 +3472,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3631,7 +3575,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // Try to generate an optimized comparison with a literal value.
   // TODO(jbramley): This only checks common values like NaN or undefined.
@@ -3654,7 +3597,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(x0, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -3662,6 +3606,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       PopOperand(x1);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
@@ -3673,6 +3618,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cond = CompareIC::ComputeCondition(op);
 
       // Pop the stack value.
@@ -3745,24 +3691,29 @@
   // and suchlike. The implementation changes a little by bleeding_edge so I
   // don't want to spend too much time on it now.
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ B(&suspend);
   // TODO(jbramley): This label is bound here because the following code
   // looks at its pos(). Is it possible to do something more efficient here,
   // perhaps using Adr?
   __ Bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, x0 holds the generator object.
   __ RecordGeneratorContinuation();
-  __ Pop(x1);
-  __ Cmp(x1, Smi::FromInt(JSGeneratorObject::RETURN));
-  __ B(ne, &resume);
+  __ Ldr(x1, FieldMemOperand(x0, JSGeneratorObject::kResumeModeOffset));
+  __ Ldr(x0, FieldMemOperand(x0, JSGeneratorObject::kInputOffset));
+  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+  __ Cmp(x1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
+  __ B(lt, &resume);
   __ Push(result_register());
+  __ B(gt, &exception);
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ Bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ Bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -3778,7 +3729,7 @@
   __ B(eq, &post_runtime);
   __ Push(x0);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ Bind(&post_runtime);
   PopOperand(result_register());
   EmitReturnSequence();
@@ -3787,110 +3738,6 @@
   context()->Plug(result_register());
 }
 
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
-    Expression *value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
-  Register generator_object = x1;
-  Register the_hole = x2;
-  Register operand_stack_size = w3;
-  Register function = x4;
-
-  // The value stays in x0, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed. x1
-  // will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(generator_object);
-
-  // Store input value into generator object.
-  __ Str(result_register(),
-         FieldMemOperand(x1, JSGeneratorObject::kInputOffset));
-  __ Mov(x2, result_register());
-  __ RecordWriteField(x1, JSGeneratorObject::kInputOffset, x2, x3,
-                      kLRHasBeenSaved, kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ Ldr(cp, FieldMemOperand(generator_object,
-                             JSGeneratorObject::kContextOffset));
-  __ Ldr(function, FieldMemOperand(generator_object,
-                                   JSGeneratorObject::kFunctionOffset));
-
-  // Load receiver and store as the first argument.
-  __ Ldr(x10, FieldMemOperand(generator_object,
-                              JSGeneratorObject::kReceiverOffset));
-  __ Push(x10);
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-
-  // The number of arguments is stored as an int32_t, and -1 is a marker
-  // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
-  // extension to correctly handle it. However, in this case, we operate on
-  // 32-bit W registers, so extension isn't required.
-  __ Ldr(w10, FieldMemOperand(x10,
-                              SharedFunctionInfo::kFormalParameterCountOffset));
-  __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
-  __ PushMultipleTimes(the_hole, w10);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ Bl(&resume_frame);
-  __ B(&done);
-
-  __ Bind(&resume_frame);
-  __ Push(lr,           // Return address.
-          fp,           // Caller's frame pointer.
-          cp,           // Callee's context.
-          function);    // Callee's JS Function.
-  __ Add(fp, __ StackPointer(), kPointerSize * 2);
-
-  // Load and untag the operand stack size.
-  __ Ldr(x10, FieldMemOperand(generator_object,
-                              JSGeneratorObject::kOperandStackOffset));
-  __ Ldr(operand_stack_size,
-         UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ Cbnz(operand_stack_size, &slow_resume);
-    __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-    __ Ldrsw(x11,
-             UntagSmiFieldMemOperand(generator_object,
-                                     JSGeneratorObject::kContinuationOffset));
-    __ Add(x10, x10, x11);
-    __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
-    __ Str(x12, FieldMemOperand(generator_object,
-                                JSGeneratorObject::kContinuationOffset));
-    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-    __ Br(x10);
-
-    __ Bind(&slow_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  __ PushMultipleTimes(the_hole, operand_stack_size);
-
-  __ Mov(x10, Smi::FromInt(resume_mode));
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  __ Push(generator_object, result_register(), x10);
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ Unreachable();
-
-  __ Bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
   OperandStackDepthIncrement(2);
   __ Push(reg1, reg2);
@@ -3923,7 +3770,8 @@
   // Allocate and populate an object with this form: { value: VAL, done: DONE }
 
   Register result = x0;
-  __ Allocate(JSIteratorResult::kSize, result, x10, x11, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, result, x10, x11, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ B(&done_allocate);
 
   __ Bind(&allocate);
diff --git a/src/full-codegen/full-codegen.cc b/src/full-codegen/full-codegen.cc
index af5dd41..2d7ad32 100644
--- a/src/full-codegen/full-codegen.cc
+++ b/src/full-codegen/full-codegen.cc
@@ -28,6 +28,8 @@
 bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
 
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::CompileFullCode);
   TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
   TRACE_EVENT0("v8", "V8.CompileFullCode");
 
@@ -144,13 +146,8 @@
 
 bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
     ObjectLiteral* expr) const {
-  // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
-  // support copy-on-write (COW) elements for now.
-  // TODO(mvstanton): make object literals support COW elements.
-  return masm()->serializer_enabled() || !expr->fast_elements() ||
-         !expr->has_shallow_properties() ||
-         expr->properties_count() >
-             FastCloneShallowObjectStub::kMaximumClonedProperties;
+  return masm()->serializer_enabled() ||
+         !FastCloneShallowObjectStub::IsSupported(expr);
 }
 
 
@@ -167,8 +164,8 @@
   masm_->set_predictable_code_size(true);
 }
 
-
-void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
+void FullCodeGenerator::PrepareForBailout(Expression* node,
+                                          BailoutState state) {
   PrepareForBailoutForId(node->id(), state);
 }
 
@@ -191,9 +188,9 @@
   // if the function was inlined, i.e., this is the return address in the
   // inlined function's frame.
   //
-  // The state is ignored.  We defensively set it to TOS_REG, which is the
-  // real state of the unoptimized code at the return site.
-  PrepareForBailoutForId(call->ReturnId(), TOS_REG);
+  // The bailout state is ignored.  We defensively set it to TOS_REGISTER, which
+  // is the real state of the unoptimized code at the return site.
+  PrepareForBailoutForId(call->ReturnId(), BailoutState::TOS_REGISTER);
 #ifdef DEBUG
   // In debug builds, mark the return so we can verify that this function
   // was called.
@@ -202,13 +199,13 @@
 #endif
 }
 
-
-void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(BailoutId id,
+                                               BailoutState state) {
   // There's no need to prepare this code for bailouts from already optimized
   // code or code that can't be optimized.
   if (!info_->HasDeoptimizationSupport()) return;
   unsigned pc_and_state =
-      StateField::encode(state) | PcField::encode(masm_->pc_offset());
+      BailoutStateField::encode(state) | PcField::encode(masm_->pc_offset());
   DCHECK(Smi::IsValid(pc_and_state));
 #ifdef DEBUG
   for (int i = 0; i < bailout_entries_.length(); ++i) {
@@ -447,10 +444,7 @@
 
 
 int FullCodeGenerator::DeclareGlobalsFlags() {
-  DCHECK(DeclareGlobalsLanguageMode::is_valid(language_mode()));
-  return DeclareGlobalsEvalFlag::encode(is_eval()) |
-         DeclareGlobalsNativeFlag::encode(is_native()) |
-         DeclareGlobalsLanguageMode::encode(language_mode());
+  return info_->GetDeclareGlobalsFlags();
 }
 
 void FullCodeGenerator::PushOperand(Handle<Object> handle) {
@@ -609,6 +603,13 @@
   EmitIntrinsicAsStubCall(expr, CodeFactory::RegExpConstructResult(isolate()));
 }
 
+void FullCodeGenerator::EmitHasProperty() {
+  Callable callable = CodeFactory::HasProperty(isolate());
+  PopOperand(callable.descriptor().GetRegisterParameter(1));
+  PopOperand(callable.descriptor().GetRegisterParameter(0));
+  __ Call(callable.code(), RelocInfo::CODE_TARGET);
+  RestoreContext();
+}
 
 bool RecordStatementPosition(MacroAssembler* masm, int pos) {
   if (pos == RelocInfo::kNoPosition) return false;
@@ -652,14 +653,9 @@
   }
 }
 
-
-void FullCodeGenerator::SetExpressionPosition(
-    Expression* expr, FullCodeGenerator::InsertBreak insert_break) {
+void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
-  bool recorded = RecordPosition(masm_, expr->position());
-  if (recorded && insert_break == INSERT_BREAK && info_->is_debug()) {
-    DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
-  }
+  RecordPosition(masm_, expr->position());
 }
 
 
@@ -688,32 +684,15 @@
 void FullCodeGenerator::VisitSuperPropertyReference(
     SuperPropertyReference* super) {
   __ CallRuntime(Runtime::kThrowUnsupportedSuperError);
+  // Even though this expression doesn't produce a value, we need to simulate
+  // plugging of the value context to ensure stack depth tracking is in sync.
+  if (context()->IsStackValue()) OperandStackDepthIncrement(1);
 }
 
 
 void FullCodeGenerator::VisitSuperCallReference(SuperCallReference* super) {
-  __ CallRuntime(Runtime::kThrowUnsupportedSuperError);
-}
-
-
-void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::NEXT);
-}
-
-
-void FullCodeGenerator::EmitGeneratorReturn(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::RETURN);
-}
-
-
-void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::THROW);
+  // Handled by VisitCall
+  UNREACHABLE();
 }
 
 
@@ -773,7 +752,7 @@
     } else {
       VisitForControl(left, test->true_label(), &eval_right, &eval_right);
     }
-    PrepareForBailoutForId(right_id, NO_REGISTERS);
+    PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
     __ bind(&eval_right);
 
   } else if (context()->IsAccumulatorValue()) {
@@ -792,7 +771,7 @@
     __ jmp(&done);
     __ bind(&discard);
     __ Drop(1);
-    PrepareForBailoutForId(right_id, NO_REGISTERS);
+    PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
 
   } else if (context()->IsStackValue()) {
     VisitForAccumulatorValue(left);
@@ -807,7 +786,7 @@
     }
     __ bind(&discard);
     __ Drop(1);
-    PrepareForBailoutForId(right_id, NO_REGISTERS);
+    PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
 
   } else {
     DCHECK(context()->IsEffect());
@@ -817,7 +796,7 @@
     } else {
       VisitForControl(left, &done, &eval_right, &eval_right);
     }
-    PrepareForBailoutForId(right_id, NO_REGISTERS);
+    PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
     __ bind(&eval_right);
   }
 
@@ -843,6 +822,41 @@
   }
 }
 
+void FullCodeGenerator::VisitProperty(Property* expr) {
+  Comment cmnt(masm_, "[ Property");
+  SetExpressionPosition(expr);
+
+  Expression* key = expr->key();
+
+  if (key->IsPropertyName()) {
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+      EmitNamedPropertyLoad(expr);
+    } else {
+      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+      VisitForStackValue(
+          expr->obj()->AsSuperPropertyReference()->home_object());
+      EmitNamedSuperPropertyLoad(expr);
+    }
+  } else {
+    if (!expr->IsSuperAccess()) {
+      VisitForStackValue(expr->obj());
+      VisitForAccumulatorValue(expr->key());
+      __ Move(LoadDescriptor::NameRegister(), result_register());
+      PopOperand(LoadDescriptor::ReceiverRegister());
+      EmitKeyedPropertyLoad(expr);
+    } else {
+      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+      VisitForStackValue(
+          expr->obj()->AsSuperPropertyReference()->home_object());
+      VisitForStackValue(expr->key());
+      EmitKeyedSuperPropertyLoad(expr);
+    }
+  }
+  PrepareForBailoutForId(expr->LoadId(), BailoutState::TOS_REGISTER);
+  context()->Plug(result_register());
+}
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
@@ -852,7 +866,7 @@
   if (proxy != NULL && (proxy->var()->IsUnallocatedOrGlobalSlot() ||
                         proxy->var()->IsLookupSlot())) {
     EmitVariableLoad(proxy, INSIDE_TYPEOF);
-    PrepareForBailout(proxy, TOS_REG);
+    PrepareForBailout(proxy, BailoutState::TOS_REGISTER);
   } else {
     // This expression cannot throw a reference error at the top level.
     VisitInDuplicateContext(expr);
@@ -875,7 +889,6 @@
 
 void FullCodeGenerator::VisitDoExpression(DoExpression* expr) {
   Comment cmnt(masm_, "[ Do Expression");
-  NestedStatement nested_block(this);
   SetExpressionPosition(expr);
   VisitBlock(expr->block());
   EmitVariableLoad(expr->result());
@@ -901,24 +914,24 @@
 
   if (stmt->HasElseStatement()) {
     VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
-    PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->ThenId(), BailoutState::NO_REGISTERS);
     __ bind(&then_part);
     Visit(stmt->then_statement());
     __ jmp(&done);
 
-    PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->ElseId(), BailoutState::NO_REGISTERS);
     __ bind(&else_part);
     Visit(stmt->else_statement());
   } else {
     VisitForControl(stmt->condition(), &then_part, &done, &then_part);
-    PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->ThenId(), BailoutState::NO_REGISTERS);
     __ bind(&then_part);
     Visit(stmt->then_statement());
 
-    PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->ElseId(), BailoutState::NO_REGISTERS);
   }
   __ bind(&done);
-  PrepareForBailoutForId(stmt->IfId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->IfId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitContinue(Statement* target) {
@@ -1082,7 +1095,7 @@
                                         BailoutId bailout_id) {
   VisitForStackValue(property->key());
   CallRuntimeWithOperands(Runtime::kToName);
-  PrepareForBailoutForId(bailout_id, NO_REGISTERS);
+  PrepareForBailoutForId(bailout_id, BailoutState::NO_REGISTERS);
   PushOperand(result_register());
 }
 
@@ -1108,12 +1121,12 @@
   Callable callable = CodeFactory::ToObject(isolate());
   __ Move(callable.descriptor().GetRegisterParameter(0), result_register());
   __ Call(callable.code(), RelocInfo::CODE_TARGET);
-  PrepareForBailoutForId(stmt->ToObjectId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::NO_REGISTERS);
   PushOperand(result_register());
   PushFunctionArgumentForContextAllocation();
   CallRuntimeWithOperands(Runtime::kPushWithContext);
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   Scope* saved_scope = scope();
   scope_ = stmt->scope();
@@ -1145,7 +1158,7 @@
   // Record the position of the do while condition and make sure it is
   // possible to break on the condition.
   __ bind(loop_statement.continue_label());
-  PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ContinueId(), BailoutState::NO_REGISTERS);
 
   // Here is the actual 'while' keyword.
   SetExpressionAsStatementPosition(stmt->cond());
@@ -1155,12 +1168,12 @@
                   &book_keeping);
 
   // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BackEdgeId(), BailoutState::NO_REGISTERS);
   __ bind(&book_keeping);
   EmitBackEdgeBookkeeping(stmt, &body);
   __ jmp(&body);
 
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(loop_statement.break_label());
   decrement_loop_depth();
 }
@@ -1181,7 +1194,7 @@
                   loop_statement.break_label(),
                   &body);
 
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   __ bind(&body);
   Visit(stmt->body());
 
@@ -1191,7 +1204,7 @@
   EmitBackEdgeBookkeeping(stmt, &loop);
   __ jmp(&loop);
 
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(loop_statement.break_label());
   decrement_loop_depth();
 }
@@ -1214,11 +1227,11 @@
   // Emit the test at the bottom of the loop (even if empty).
   __ jmp(&test);
 
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   __ bind(&body);
   Visit(stmt->body());
 
-  PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ContinueId(), BailoutState::NO_REGISTERS);
   __ bind(loop_statement.continue_label());
   if (stmt->next() != NULL) {
     SetStatementPosition(stmt->next());
@@ -1239,7 +1252,7 @@
     __ jmp(&body);
   }
 
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(loop_statement.break_label());
   decrement_loop_depth();
 }
@@ -1252,6 +1265,7 @@
   increment_loop_depth();
 
   // var iterator = iterable[Symbol.iterator]();
+  SetExpressionAsStatementPosition(stmt->assign_iterator());
   VisitForEffect(stmt->assign_iterator());
 
   // Loop entry.
@@ -1274,12 +1288,12 @@
   Visit(stmt->body());
 
   // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BackEdgeId(), BailoutState::NO_REGISTERS);
   EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
   __ jmp(loop_statement.continue_label());
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(loop_statement.break_label());
   decrement_loop_depth();
 }
@@ -1423,7 +1437,7 @@
   __ DebugBreak();
   // Ignore the return value.
 
-  PrepareForBailoutForId(stmt->DebugBreakId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->DebugBreakId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -1438,7 +1452,7 @@
   VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
 
   int original_stack_depth = operand_stack_depth_;
-  PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->ThenId(), BailoutState::NO_REGISTERS);
   __ bind(&true_case);
   SetExpressionPosition(expr->then_expression());
   if (context()->IsTest()) {
@@ -1453,7 +1467,7 @@
   }
 
   operand_stack_depth_ = original_stack_depth;
-  PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->ElseId(), BailoutState::NO_REGISTERS);
   __ bind(&false_case);
   SetExpressionPosition(expr->else_expression());
   VisitInDuplicateContext(expr->else_expression());
@@ -1504,7 +1518,7 @@
     PushOperand(Smi::FromInt(lit->end_position()));
 
     CallRuntimeWithOperands(Runtime::kDefineClass);
-    PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
+    PrepareForBailoutForId(lit->CreateLiteralId(), BailoutState::TOS_REGISTER);
     PushOperand(result_register());
 
     // Load the "prototype" from the constructor.
@@ -1513,13 +1527,14 @@
                 Heap::kprototype_stringRootIndex);
     __ Move(LoadDescriptor::SlotRegister(), SmiFromSlot(lit->PrototypeSlot()));
     CallLoadIC(NOT_INSIDE_TYPEOF);
-    PrepareForBailoutForId(lit->PrototypeId(), TOS_REG);
+    PrepareForBailoutForId(lit->PrototypeId(), BailoutState::TOS_REGISTER);
     PushOperand(result_register());
 
     EmitClassDefineProperties(lit);
+    DropOperands(1);
 
-    // Set both the prototype and constructor to have fast properties.
-    CallRuntimeWithOperands(Runtime::kFinalizeClassDefinition);
+    // Set the constructor to have fast properties.
+    CallRuntimeWithOperands(Runtime::kToFastProperties);
 
     if (lit->class_variable_proxy() != nullptr) {
       EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
@@ -1653,7 +1668,7 @@
       VisitForStackValue(args->at(i));
     }
 
-    PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
     EmitCallJSRuntimeFunction(expr);
     context()->DropAndPlug(1, result_register());
 
@@ -1675,7 +1690,7 @@
         }
 
         // Call the C runtime function.
-        PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
         __ CallRuntime(expr->function(), arg_count);
         OperandStackDepthDecrement(arg_count);
         context()->Plug(result_register());
@@ -1768,16 +1783,19 @@
   Expression* sub_expr;
   Handle<String> check;
   if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    SetExpressionPosition(expr);
     EmitLiteralCompareTypeof(expr, sub_expr, check);
     return true;
   }
 
   if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    SetExpressionPosition(expr);
     EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
     return true;
   }
 
   if (expr->IsLiteralCompareNull(&sub_expr)) {
+    SetExpressionPosition(expr);
     EmitLiteralCompareNil(expr, sub_expr, kNullValue);
     return true;
   }
@@ -1861,7 +1879,7 @@
   saved_scope_ = codegen_->scope();
 
   if (scope == NULL) {
-    codegen_->PrepareForBailoutForId(entry_id, NO_REGISTERS);
+    codegen_->PrepareForBailoutForId(entry_id, BailoutState::NO_REGISTERS);
     needs_block_context_ = false;
   } else {
     needs_block_context_ = scope->NeedsContext();
@@ -1878,12 +1896,13 @@
                                     codegen_->context_register());
       }
       CHECK_EQ(0, scope->num_stack_slots());
-      codegen_->PrepareForBailoutForId(entry_id, NO_REGISTERS);
+      codegen_->PrepareForBailoutForId(entry_id, BailoutState::NO_REGISTERS);
     }
     {
       Comment cmnt(masm(), "[ Declarations");
       codegen_->VisitDeclarations(scope->declarations());
-      codegen_->PrepareForBailoutForId(declarations_id, NO_REGISTERS);
+      codegen_->PrepareForBailoutForId(declarations_id,
+                                       BailoutState::NO_REGISTERS);
     }
   }
 }
@@ -1897,7 +1916,7 @@
     codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
                                 codegen_->context_register());
   }
-  codegen_->PrepareForBailoutForId(exit_id_, NO_REGISTERS);
+  codegen_->PrepareForBailoutForId(exit_id_, BailoutState::NO_REGISTERS);
   codegen_->scope_ = saved_scope_;
 }
 
@@ -1956,7 +1975,7 @@
   DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
   DCHECK(proxy->position() != RelocInfo::kNoPosition);
 
-  return var->mode() == CONST_LEGACY || var->scope()->is_nonlinear() ||
+  return var->scope()->is_nonlinear() ||
          var->initializer_position() >= proxy->position();
 }
 
diff --git a/src/full-codegen/full-codegen.h b/src/full-codegen/full-codegen.h
index 0c12937..0a004a8 100644
--- a/src/full-codegen/full-codegen.h
+++ b/src/full-codegen/full-codegen.h
@@ -14,6 +14,7 @@
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
+#include "src/deoptimizer.h"
 #include "src/globals.h"
 #include "src/objects.h"
 
@@ -28,11 +29,6 @@
 
 class FullCodeGenerator: public AstVisitor {
  public:
-  enum State {
-    NO_REGISTERS,
-    TOS_REG
-  };
-
   FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
       : masm_(masm),
         info_(info),
@@ -60,19 +56,10 @@
 
   static bool MakeCode(CompilationInfo* info);
 
-  // Encode state and pc-offset as a BitField<type, start, size>.
+  // Encode bailout state and pc-offset as a BitField<type, start, size>.
   // Only use 30 bits because we encode the result as a smi.
-  class StateField : public BitField<State, 0, 1> { };
-  class PcField    : public BitField<unsigned, 1, 30-1> { };
-
-  static const char* State2String(State state) {
-    switch (state) {
-      case NO_REGISTERS: return "NO_REGISTERS";
-      case TOS_REG: return "TOS_REG";
-    }
-    UNREACHABLE();
-    return NULL;
-  }
+  class BailoutStateField : public BitField<Deoptimizer::BailoutState, 0, 1> {};
+  class PcField : public BitField<unsigned, 1, 30 - 1> {};
 
   static const int kMaxBackEdgeWeight = 127;
 
@@ -106,6 +93,8 @@
   static Register result_register();
 
  private:
+  typedef Deoptimizer::BailoutState BailoutState;
+
   class Breakable;
   class Iteration;
   class TryFinally;
@@ -366,21 +355,21 @@
     if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
     EffectContext context(this);
     Visit(expr);
-    PrepareForBailout(expr, NO_REGISTERS);
+    PrepareForBailout(expr, BailoutState::NO_REGISTERS);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
     if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
     AccumulatorValueContext context(this);
     Visit(expr);
-    PrepareForBailout(expr, TOS_REG);
+    PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   }
 
   void VisitForStackValue(Expression* expr) {
     if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
     StackValueContext context(this);
     Visit(expr);
-    PrepareForBailout(expr, NO_REGISTERS);
+    PrepareForBailout(expr, BailoutState::NO_REGISTERS);
   }
 
   void VisitForControl(Expression* expr,
@@ -452,8 +441,8 @@
                              NilValue nil);
 
   // Bailout support.
-  void PrepareForBailout(Expression* node, State state);
-  void PrepareForBailoutForId(BailoutId id, State state);
+  void PrepareForBailout(Expression* node, Deoptimizer::BailoutState state);
+  void PrepareForBailoutForId(BailoutId id, Deoptimizer::BailoutState state);
 
   // Returns a smi for the index into the FixedArray that backs the feedback
   // vector
@@ -531,9 +520,6 @@
   F(HasCachedArrayIndex)                \
   F(GetCachedArrayIndex)                \
   F(GetSuperConstructor)                \
-  F(GeneratorNext)                      \
-  F(GeneratorReturn)                    \
-  F(GeneratorThrow)                     \
   F(DebugBreakInOptimizedCode)          \
   F(ClassOf)                            \
   F(StringCharCodeAt)                   \
@@ -548,7 +534,6 @@
   F(ToName)                             \
   F(ToObject)                           \
   F(DebugIsActive)                      \
-  F(GetOrdinaryHasInstance)             \
   F(CreateIterResultObject)
 
 #define GENERATOR_DECLARATION(Name) void Emit##Name(CallRuntime* call);
@@ -557,10 +542,11 @@
 
   void EmitIntrinsicAsStubCall(CallRuntime* expr, const Callable& callable);
 
-  // Platform-specific code for resuming generators.
-  void EmitGeneratorResume(Expression *generator,
-                           Expression *value,
-                           JSGeneratorObject::ResumeMode resume_mode);
+  // Emits call to respective code stub.
+  void EmitHasProperty();
+
+  // Platform-specific code for restoring context from current JS frame.
+  void RestoreContext();
 
   // Platform-specific code for loading variables.
   void EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
@@ -577,7 +563,7 @@
   bool NeedsHoleCheckForLoad(VariableProxy* proxy);
 
   // Expects the arguments and the function already pushed.
-  void EmitResolvePossiblyDirectEval(int arg_count);
+  void EmitResolvePossiblyDirectEval(Call* expr);
 
   // Platform-specific support for allocating a new closure based on
   // the given function info.
@@ -687,8 +673,7 @@
   // otherwise.
   void SetStatementPosition(Statement* stmt,
                             InsertBreak insert_break = INSERT_BREAK);
-  void SetExpressionPosition(Expression* expr,
-                             InsertBreak insert_break = SKIP_BREAK);
+  void SetExpressionPosition(Expression* expr);
 
   // Consider an expression a statement. As such, we also insert a break.
   // This is used in loop headers where we want to break for each iteration.
@@ -729,8 +714,6 @@
   Isolate* isolate() const { return isolate_; }
   Zone* zone() const { return zone_; }
   Handle<Script> script() { return info_->script(); }
-  bool is_eval() { return info_->is_eval(); }
-  bool is_native() { return info_->is_native(); }
   LanguageMode language_mode() { return scope()->language_mode(); }
   bool has_simple_parameters() { return info_->has_simple_parameters(); }
   FunctionLiteral* literal() const { return info_->literal(); }
diff --git a/src/full-codegen/ia32/full-codegen-ia32.cc b/src/full-codegen/ia32/full-codegen-ia32.cc
index f1945c8..760a818 100644
--- a/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -168,7 +168,8 @@
       __ push(edi);
       __ Push(info->scope()->GetScopeInfo(info->isolate()));
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -227,7 +228,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -289,7 +291,8 @@
   }
 
   // Visit the declarations and body.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -302,7 +305,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     ExternalReference stack_limit =
         ExternalReference::address_of_stack_limit(isolate());
@@ -369,11 +373,11 @@
   EmitProfilingCounterReset();
 
   __ bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -423,6 +427,9 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -677,7 +684,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ cmp(eax, isolate()->factory()->true_value());
     Split(equal, if_true, if_false, NULL);
@@ -708,14 +715,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -734,7 +740,7 @@
         __ mov(ContextOperand(esi, variable->index()),
                Immediate(isolate()->factory()->the_hole_value()));
         // No write barrier since the hole value is in old space.
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+        PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -755,6 +761,7 @@
       __ push(
           Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -798,7 +805,7 @@
                                 kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET,
                                 OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -808,6 +815,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -838,7 +846,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -887,7 +895,7 @@
 
     Label skip;
     __ jmp(&skip, Label::kNear);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ cmp(eax, isolate()->factory()->true_value());
     __ j(not_equal, &next_test);
     __ Drop(1);
@@ -915,12 +923,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -953,15 +961,13 @@
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
   __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ push(eax);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime, use_cache, fixed_array;
   __ CheckEnumCache(&call_runtime);
 
@@ -972,7 +978,7 @@
   __ bind(&call_runtime);
   __ push(eax);
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
   __ j(not_equal, &fixed_array);
@@ -1008,7 +1014,7 @@
   __ push(eax);  // Array
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1046,7 +1052,7 @@
   __ push(ecx);  // Enumerable.
   __ push(ebx);  // Current entry.
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ cmp(eax, isolate()->factory()->undefined_value());
   __ j(equal, loop_statement.continue_label());
   __ mov(ebx, eax);
@@ -1058,11 +1064,11 @@
   // Perform the assignment as if via '='.
   { EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1079,7 +1085,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1207,16 +1213,11 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ cmp(eax, isolate()->factory()->the_hole_value());
       __ j(not_equal, done);
-      if (local->mode() == CONST_LEGACY) {
-        __ mov(eax, isolate()->factory()->undefined_value());
-      } else {  // LET || CONST
-        __ push(Immediate(var->name()));
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ push(Immediate(var->name()));
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ jmp(done);
   }
@@ -1242,7 +1243,7 @@
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1274,10 +1275,6 @@
           // binding in harmony mode.
           __ push(Immediate(var->name()));
           __ CallRuntime(Runtime::kThrowReferenceError);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ mov(eax, isolate()->factory()->undefined_value());
         }
         __ bind(&done);
         context()->Plug(eax);
@@ -1356,8 +1353,9 @@
     __ mov(edx, Immediate(Smi::FromInt(flags)));
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in eax.
@@ -1393,7 +1391,7 @@
             __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
             }
@@ -1421,7 +1419,7 @@
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
@@ -1477,7 +1475,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1550,7 +1548,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1580,7 +1578,8 @@
     Handle<Code> ic =
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1600,7 +1599,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1615,7 +1615,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1680,23 +1679,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1715,7 +1718,7 @@
     }
 
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1727,7 +1730,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(eax);
       break;
     case NAMED_PROPERTY:
@@ -1756,21 +1759,26 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ jmp(&suspend);
   __ bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, eax holds the generator object.
   __ RecordGeneratorContinuation();
-  __ pop(ebx);
-  __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
-  __ j(not_equal, &resume);
-  __ push(result_register());
+  __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
+  __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOffset));
+  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+  __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
+  __ j(less, &resume);
+  __ Push(result_register());
+  __ j(greater, &exception);
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -1786,8 +1794,7 @@
   __ j(equal, &post_runtime);
   __ push(eax);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ mov(context_register(),
-         Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ bind(&post_runtime);
   PopOperand(result_register());
   EmitReturnSequence();
@@ -1796,101 +1803,6 @@
   context()->Plug(result_register());
 }
 
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
-    Expression *value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in eax, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // ebx will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(ebx);
-
-  // Store input value into generator object.
-  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), result_register());
-  __ mov(ecx, result_register());
-  __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, ecx, edx,
-                      kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
-  __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
-
-  // Push receiver.
-  __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(edx,
-         FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ mov(ecx, isolate()->factory()->the_hole_value());
-  Label push_argument_holes, push_frame;
-  __ bind(&push_argument_holes);
-  __ sub(edx, Immediate(Smi::FromInt(1)));
-  __ j(carry, &push_frame);
-  __ push(ecx);
-  __ jmp(&push_argument_holes);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ call(&resume_frame);
-  __ jmp(&done);
-  __ bind(&resume_frame);
-  __ push(ebp);  // Caller's frame pointer.
-  __ mov(ebp, esp);
-  __ push(esi);  // Callee's context.
-  __ push(edi);  // Callee's JS Function.
-
-  // Load the operand stack size.
-  __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
-  __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
-  __ SmiUntag(edx);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ cmp(edx, Immediate(0));
-    __ j(not_zero, &slow_resume);
-    __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(ecx);
-    __ add(edx, ecx);
-    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
-           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-    __ jmp(edx);
-    __ bind(&slow_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label push_operand_holes, call_resume;
-  __ bind(&push_operand_holes);
-  __ sub(edx, Immediate(1));
-  __ j(carry, &call_resume);
-  __ push(ecx);
-  __ jmp(&push_operand_holes);
-  __ bind(&call_resume);
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  __ push(ebx);
-  __ push(result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ Abort(kGeneratorFailedToResume);
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperand(MemOperand operand) {
   OperandStackDepthIncrement(1);
   __ Push(operand);
@@ -1910,7 +1822,8 @@
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
 
-  __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ jmp(&done_allocate, Label::kNear);
 
   __ bind(&allocate);
@@ -2232,8 +2145,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(Immediate(var->name()));
@@ -2255,25 +2167,6 @@
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
 
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ push(eax);
-      __ push(esi);
-      __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackLocal() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, ecx);
-      __ mov(edx, location);
-      __ cmp(edx, isolate()->factory()->the_hole_value());
-      __ j(not_equal, &skip, Label::kNear);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2296,7 +2189,7 @@
   PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(eax);
 }
 
@@ -2342,44 +2235,7 @@
       CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), result_register());
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      PopOperand(LoadDescriptor::ReceiverRegister());              // Object.
-      __ Move(LoadDescriptor::NameRegister(), result_register());  // Key.
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(eax);
 }
 
@@ -2400,7 +2256,7 @@
   if (callee->IsVariableProxy()) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
@@ -2412,7 +2268,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     PushOperand(Operand(esp, 0));
     __ mov(Operand(esp, kPointerSize), eax);
@@ -2447,6 +2304,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ mov(Operand(esp, kPointerSize), eax);
@@ -2471,7 +2329,8 @@
   __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
   __ mov(LoadDescriptor::NameRegister(), eax);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   PushOperand(Operand(esp, 0));
@@ -2503,6 +2362,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ mov(Operand(esp, kPointerSize), eax);
@@ -2522,7 +2382,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
@@ -2543,15 +2403,12 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
+  RestoreContext();
   context()->DropAndPlug(1, eax);
 }
 
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(esp, arg_count * kPointerSize));
@@ -2568,6 +2425,9 @@
   // Push the start position of the scope the calls resides in.
   __ push(Immediate(Smi::FromInt(scope()->start_position())));
 
+  // Push the source position of the eval call.
+  __ push(Immediate(Smi::FromInt(expr->position())));
+
   // Do the runtime call.
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
 }
@@ -2590,7 +2450,7 @@
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperand(eax);  // Function.
     PushOperand(edx);  // Receiver.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2614,7 +2474,7 @@
 
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
   // to resolve the function we need to call.  Then we call the resolved
   // function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2630,12 +2490,12 @@
   // Push a copy of the function (found below the arguments) and
   // resolve eval.
   __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the stack with the resolved function.
   __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   SetCallPosition(expr);
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -2645,8 +2505,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, eax);
 }
 
@@ -2685,9 +2544,8 @@
   CallConstructStub stub(isolate());
   __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(eax);
 }
 
@@ -2728,9 +2586,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->Plug(eax);
 }
 
@@ -3124,7 +2980,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to edi.
   int const argc = args->length() - 2;
   __ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -3132,8 +2988,7 @@
   __ mov(eax, Immediate(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, eax);
 }
@@ -3187,13 +3042,6 @@
   context()->Plug(eax);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ mov(eax, NativeContextOperand());
-  __ mov(eax, ContextOperand(eax, Context::ORDINARY_HAS_INSTANCE_INDEX));
-  context()->Plug(eax);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3212,7 +3060,8 @@
 
   Label runtime, done;
 
-  __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime,
+              NO_ALLOCATION_FLAGS);
   __ mov(ebx, NativeContextOperand());
   __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
   __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
@@ -3253,9 +3102,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 
@@ -3340,7 +3187,8 @@
                         &materialize_true);
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
           __ mov(eax, isolate()->factory()->true_value());
         } else {
@@ -3348,7 +3196,8 @@
         }
         __ jmp(&done, Label::kNear);
         __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
           __ mov(eax, isolate()->factory()->false_value());
         } else {
@@ -3447,9 +3296,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3504,7 +3353,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3552,7 +3401,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(eax);
         }
         // For all contexts except EffectContext We have the result on
@@ -3564,7 +3414,8 @@
         // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(eax);
       }
       break;
@@ -3574,7 +3425,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3613,7 +3464,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         // Result is on the stack
         if (!context()->IsEffect()) {
@@ -3709,7 +3560,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3729,7 +3579,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
@@ -3737,6 +3588,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       PopOperand(edx);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
@@ -3748,6 +3600,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cc = CompareIC::ComputeCondition(op);
       PopOperand(edx);
 
diff --git a/src/full-codegen/mips/full-codegen-mips.cc b/src/full-codegen/mips/full-codegen-mips.cc
index f329a23..e61c3e4 100644
--- a/src/full-codegen/mips/full-codegen-mips.cc
+++ b/src/full-codegen/mips/full-codegen-mips.cc
@@ -186,7 +186,8 @@
       __ push(a1);
       __ Push(info->scope()->GetScopeInfo(info->isolate()));
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -242,7 +243,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -305,7 +307,8 @@
 
   // Visit the declarations and body unless there is an illegal
   // redeclaration.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -318,7 +321,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     __ LoadRoot(at, Heap::kStackLimitRootIndex);
     __ Branch(&ok, hs, sp, Operand(at));
@@ -397,11 +401,11 @@
   EmitProfilingCounterReset();
 
   __ bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -459,6 +463,9 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -724,7 +731,7 @@
 
   Label skip;
   if (should_normalize) __ Branch(&skip);
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ LoadRoot(t0, Heap::kTrueValueRootIndex);
     Split(eq, a0, Operand(t0), if_true, if_false, NULL);
@@ -758,15 +765,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(),
-                    zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -785,7 +790,7 @@
           __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
           __ sw(at, ContextMemOperand(cp, variable->index()));
           // No write barrier since the_hole_value is in old space.
-          PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+          PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -807,6 +812,7 @@
       __ Push(a2, a0);
       __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -852,7 +858,7 @@
                                 kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET,
                                 OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -864,6 +870,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -895,7 +902,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -945,7 +952,7 @@
 
     Label skip;
     __ Branch(&skip);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ LoadRoot(at, Heap::kTrueValueRootIndex);
     __ Branch(&next_test, ne, v0, Operand(at));
     __ Drop(1);
@@ -972,12 +979,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -1013,15 +1020,13 @@
   __ CallStub(&stub);
   __ mov(a0, v0);
   __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ push(a0);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime;
   __ CheckEnumCache(&call_runtime);
 
@@ -1035,7 +1040,7 @@
   __ bind(&call_runtime);
   __ push(a0);  // Duplicate the enumerable object on the stack.
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
 
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
@@ -1073,7 +1078,7 @@
   __ Push(a1, v0);  // Smi and array
   __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
   __ Push(a1);  // Fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ li(a0, Operand(Smi::FromInt(0)));
   __ Push(a0);  // Initial index.
 
@@ -1114,7 +1119,7 @@
   // just skip it.
   __ Push(a1, a3);  // Enumerable and current entry.
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ mov(a3, result_register());
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   __ Branch(loop_statement.continue_label(), eq, a3, Operand(at));
@@ -1126,11 +1131,11 @@
   // Perform the assignment as if via '='.
   { EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1149,7 +1154,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1278,19 +1283,13 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
       __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
-      if (local->mode() == CONST_LEGACY) {
-        __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-        __ Movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
-      } else {  // LET || CONST
-        __ Branch(done, ne, at, Operand(zero_reg));
-        __ li(a0, Operand(var->name()));
-        __ push(a0);
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ Branch(done, ne, at, Operand(zero_reg));
+      __ li(a0, Operand(var->name()));
+      __ push(a0);
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ Branch(done);
   }
@@ -1314,7 +1313,7 @@
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1348,11 +1347,6 @@
           __ push(a0);
           __ CallRuntime(Runtime::kThrowReferenceError);
           __ bind(&done);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-          __ Movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
         }
         context()->Plug(v0);
         break;
@@ -1424,8 +1418,9 @@
   } else {
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in v0.
@@ -1462,7 +1457,7 @@
             __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1496,7 +1491,7 @@
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
@@ -1553,7 +1548,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1625,7 +1620,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1657,7 +1652,8 @@
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1677,7 +1673,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1692,7 +1689,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1761,23 +1757,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1797,7 +1797,7 @@
     }
 
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1809,7 +1809,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(v0);
       break;
     case NAMED_PROPERTY:
@@ -1838,20 +1838,24 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ jmp(&suspend);
   __ bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, v0 holds the generator object.
   __ RecordGeneratorContinuation();
-  __ pop(a1);
-  __ Branch(&resume, ne, a1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
-  __ push(result_register());
+  __ lw(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
+  __ lw(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOffset));
+  __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
+  __ Push(result_register());
+  __ Branch(&exception, eq, a1,
+            Operand(Smi::FromInt(JSGeneratorObject::kThrow)));
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -1866,7 +1870,7 @@
   __ Branch(&post_runtime, eq, sp, Operand(a1));
   __ push(v0);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ bind(&post_runtime);
   PopOperand(result_register());
   EmitReturnSequence();
@@ -1875,103 +1879,6 @@
   context()->Plug(result_register());
 }
 
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
-    Expression *value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in a0, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // a1 will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(a1);
-
-  // Store input value into generator object.
-  __ sw(result_register(),
-        FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
-  __ mov(a2, result_register());
-  __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, a2, a3,
-                      kRAHasBeenSaved, kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
-  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
-
-  // Load receiver and store as the first argument.
-  __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
-  __ push(a2);
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a3,
-        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
-  Label push_argument_holes, push_frame;
-  __ bind(&push_argument_holes);
-  __ Subu(a3, a3, Operand(Smi::FromInt(1)));
-  __ Branch(&push_frame, lt, a3, Operand(zero_reg));
-  __ push(a2);
-  __ jmp(&push_argument_holes);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ Call(&resume_frame);
-  __ jmp(&done);
-  __ bind(&resume_frame);
-  // ra = return address.
-  // fp = caller's frame pointer.
-  // cp = callee's context,
-  // t0 = callee's JS function.
-  __ PushStandardFrame(t0);
-
-  // Load the operand stack size.
-  __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-  __ lw(a3, FieldMemOperand(a3, FixedArray::kLengthOffset));
-  __ SmiUntag(a3);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ Branch(&slow_resume, ne, a3, Operand(zero_reg));
-    __ lw(a3, FieldMemOperand(t0, JSFunction::kCodeEntryOffset));
-    __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(a2);
-    __ Addu(a3, a3, Operand(a2));
-    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-    __ Jump(a3);
-    __ bind(&slow_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label push_operand_holes, call_resume;
-  __ bind(&push_operand_holes);
-  __ Subu(a3, a3, Operand(1));
-  __ Branch(&call_resume, lt, a3, Operand(zero_reg));
-  __ push(a2);
-  __ Branch(&push_operand_holes);
-  __ bind(&call_resume);
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  DCHECK(!result_register().is(a1));
-  __ Push(a1, result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ stop("not-reached");
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
   OperandStackDepthIncrement(2);
   __ Push(reg1, reg2);
@@ -2006,7 +1913,8 @@
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
 
-  __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ jmp(&done_allocate);
 
   __ bind(&allocate);
@@ -2333,8 +2241,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2356,24 +2263,6 @@
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
 
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ li(a0, Operand(var->name()));
-      __ Push(v0, cp, a0);  // Context and name.
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, a1);
-      __ lw(a2, location);
-      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-      __ Branch(&skip, ne, a2, Operand(at));
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2397,7 +2286,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(v0);
 }
 
@@ -2448,44 +2337,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), v0);
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ Move(LoadDescriptor::NameRegister(), v0);
-      PopOperand(LoadDescriptor::ReceiverRegister());
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(v0);
 }
 
@@ -2506,7 +2358,7 @@
   if (callee->IsVariableProxy()) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
@@ -2519,7 +2371,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     __ lw(at, MemOperand(sp, 0));
     PushOperand(at);
@@ -2556,6 +2409,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ sw(v0, MemOperand(sp, kPointerSize));
@@ -2580,7 +2434,8 @@
   __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
   __ Move(LoadDescriptor::NameRegister(), v0);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   __ lw(at, MemOperand(sp, 0));
@@ -2614,6 +2469,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ sw(v0, MemOperand(sp, kPointerSize));
@@ -2633,7 +2489,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Record source position of the IC call.
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
@@ -2655,31 +2511,33 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, v0);
 }
 
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
-  // t3: copy of the first argument or undefined if it doesn't exist.
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
+  // t4: copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
-    __ lw(t3, MemOperand(sp, arg_count * kPointerSize));
+    __ lw(t4, MemOperand(sp, arg_count * kPointerSize));
   } else {
-    __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
   }
 
-  // t2: the receiver of the enclosing function.
-  __ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  // t3: the receiver of the enclosing function.
+  __ lw(t3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
 
-  // t1: the language mode.
-  __ li(t1, Operand(Smi::FromInt(language_mode())));
+  // t2: the language mode.
+  __ li(t2, Operand(Smi::FromInt(language_mode())));
 
-  // t0: the start position of the scope the calls resides in.
-  __ li(t0, Operand(Smi::FromInt(scope()->start_position())));
+  // t1: the start position of the scope the calls resides in.
+  __ li(t1, Operand(Smi::FromInt(scope()->start_position())));
+
+  // t0: the source position of the eval call.
+  __ li(t0, Operand(Smi::FromInt(expr->position())));
 
   // Do the runtime call.
-  __ Push(t3, t2, t1, t0);
+  __ Push(t4, t3, t2, t1, t0);
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
 }
 
@@ -2701,7 +2559,7 @@
     __ Push(callee->name());
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperands(v0, v1);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the
     // function and receiver and have the slow path jump around this
@@ -2728,7 +2586,7 @@
 
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
   // to resolve the function we need to call.  Then we call the resolved
   // function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2744,12 +2602,12 @@
   // resolve eval.
   __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ push(a1);
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the stack with the resolved function.
   __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
   // Record source position for debugger.
   SetCallPosition(expr);
   __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2759,8 +2617,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, v0);
 }
 
@@ -2799,9 +2656,8 @@
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(v0);
 }
 
@@ -2844,9 +2700,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->Plug(v0);
 }
 
@@ -3250,7 +3104,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to a1.
   int const argc = args->length() - 2;
   __ lw(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3258,8 +3112,7 @@
   __ li(a0, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, v0);
 }
@@ -3310,12 +3163,6 @@
   context()->Plug(v0);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, v0);
-  context()->Plug(v0);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3335,7 +3182,8 @@
 
   Label runtime, done;
 
-  __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime,
+              NO_ALLOCATION_FLAGS);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
   __ Pop(a2, a3);
   __ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
@@ -3376,9 +3224,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 
@@ -3462,12 +3308,14 @@
                         &materialize_true);
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(v0, Heap::kTrueValueRootIndex);
         if (context()->IsStackValue()) __ push(v0);
         __ jmp(&done);
         __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(v0, Heap::kFalseValueRootIndex);
         if (context()->IsStackValue()) __ push(v0);
         __ bind(&done);
@@ -3567,9 +3415,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3620,7 +3468,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3666,7 +3514,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(v0);
         }
         // For all contexts except EffectConstant we have the result on
@@ -3677,7 +3526,8 @@
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(v0);
       }
       break;
@@ -3688,7 +3538,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3728,7 +3578,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3825,7 +3675,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3845,7 +3694,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(t0, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
@@ -3853,6 +3703,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       __ mov(a0, result_register());
       PopOperand(a1);
       InstanceOfStub stub(isolate());
@@ -3865,6 +3716,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cc = CompareIC::ComputeCondition(op);
       __ mov(a0, result_register());
       PopOperand(a1);
diff --git a/src/full-codegen/mips64/full-codegen-mips64.cc b/src/full-codegen/mips64/full-codegen-mips64.cc
index 681abd1..a93489d 100644
--- a/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -185,7 +185,8 @@
       __ push(a1);
       __ Push(info->scope()->GetScopeInfo(info->isolate()));
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -241,7 +242,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -302,7 +304,8 @@
   }
 
   // Visit the declarations and body.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -315,7 +318,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     __ LoadRoot(at, Heap::kStackLimitRootIndex);
     __ Branch(&ok, hs, sp, Operand(at));
@@ -396,11 +400,11 @@
   EmitProfilingCounterReset();
 
   __ bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -458,6 +462,9 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -723,7 +730,7 @@
 
   Label skip;
   if (should_normalize) __ Branch(&skip);
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ LoadRoot(a4, Heap::kTrueValueRootIndex);
     Split(eq, a0, Operand(a4), if_true, if_false, NULL);
@@ -757,15 +764,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(),
-                    zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -784,7 +789,7 @@
           __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
           __ sd(at, ContextMemOperand(cp, variable->index()));
           // No write barrier since the_hole_value is in old space.
-          PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+          PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -806,6 +811,7 @@
       __ Push(a2, a0);
       __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -851,7 +857,7 @@
                                 kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET,
                                 OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -863,6 +869,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -894,7 +901,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -944,7 +951,7 @@
 
     Label skip;
     __ Branch(&skip);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ LoadRoot(at, Heap::kTrueValueRootIndex);
     __ Branch(&next_test, ne, v0, Operand(at));
     __ Drop(1);
@@ -971,12 +978,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -1013,15 +1020,13 @@
   __ CallStub(&stub);
   __ mov(a0, v0);
   __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ push(a0);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime;
   __ CheckEnumCache(&call_runtime);
 
@@ -1035,7 +1040,7 @@
   __ bind(&call_runtime);
   __ push(a0);  // Duplicate the enumerable object on the stack.
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
 
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
@@ -1073,7 +1078,7 @@
   __ Push(a1, v0);  // Smi and array
   __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
   __ Push(a1);  // Fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ li(a0, Operand(Smi::FromInt(0)));
   __ Push(a0);  // Initial index.
 
@@ -1115,7 +1120,7 @@
   // just skip it.
   __ Push(a1, a3);  // Enumerable and current entry.
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ mov(a3, result_register());
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
   __ Branch(loop_statement.continue_label(), eq, a3, Operand(at));
@@ -1127,11 +1132,11 @@
   // Perform the assignment as if via '='.
   { EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1150,7 +1155,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1279,19 +1284,13 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ ld(v0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
       __ dsubu(at, v0, at);  // Sub as compare: at == 0 on eq.
-      if (local->mode() == CONST_LEGACY) {
-        __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-        __ Movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
-      } else {  // LET || CONST
-        __ Branch(done, ne, at, Operand(zero_reg));
-        __ li(a0, Operand(var->name()));
-        __ push(a0);
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ Branch(done, ne, at, Operand(zero_reg));
+      __ li(a0, Operand(var->name()));
+      __ push(a0);
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ Branch(done);
   }
@@ -1315,7 +1314,7 @@
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1349,11 +1348,6 @@
           __ push(a0);
           __ CallRuntime(Runtime::kThrowReferenceError);
           __ bind(&done);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-          __ Movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
         }
         context()->Plug(v0);
         break;
@@ -1425,8 +1419,9 @@
   } else {
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in v0.
@@ -1463,7 +1458,7 @@
             __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1497,7 +1492,7 @@
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
@@ -1554,7 +1549,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1626,7 +1621,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1658,7 +1653,8 @@
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1678,7 +1674,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1693,7 +1690,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1762,23 +1758,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1798,7 +1798,7 @@
     }
 
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1810,7 +1810,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(v0);
       break;
     case NAMED_PROPERTY:
@@ -1839,20 +1839,24 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ jmp(&suspend);
   __ bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, v0 holds the generator object.
   __ RecordGeneratorContinuation();
-  __ pop(a1);
-  __ Branch(&resume, ne, a1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
-  __ push(result_register());
+  __ ld(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
+  __ ld(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOffset));
+  __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
+  __ Push(result_register());
+  __ Branch(&exception, eq, a1,
+            Operand(Smi::FromInt(JSGeneratorObject::kThrow)));
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -1867,7 +1871,7 @@
   __ Branch(&post_runtime, eq, sp, Operand(a1));
   __ push(v0);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ bind(&post_runtime);
   PopOperand(result_register());
   EmitReturnSequence();
@@ -1876,105 +1880,6 @@
   context()->Plug(result_register());
 }
 
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
-    Expression *value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in a0, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // a1 will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(a1);
-
-  // Store input value into generator object.
-  __ sd(result_register(),
-        FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
-  __ mov(a2, result_register());
-  __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, a2, a3,
-                      kRAHasBeenSaved, kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
-  __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
-
-  // Load receiver and store as the first argument.
-  __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
-  __ push(a2);
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
-  // The argument count is stored as int32_t on 64-bit platforms.
-  // TODO(plind): Smi on 32-bit platforms.
-  __ lw(a3,
-        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
-  Label push_argument_holes, push_frame;
-  __ bind(&push_argument_holes);
-  __ Dsubu(a3, a3, Operand(1));
-  __ Branch(&push_frame, lt, a3, Operand(zero_reg));
-  __ push(a2);
-  __ jmp(&push_argument_holes);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ Call(&resume_frame);
-  __ jmp(&done);
-  __ bind(&resume_frame);
-  // ra = return address.
-  // fp = caller's frame pointer.
-  // cp = callee's context,
-  // a4 = callee's JS function.
-  __ PushStandardFrame(a4);
-
-  // Load the operand stack size.
-  __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-  __ ld(a3, FieldMemOperand(a3, FixedArray::kLengthOffset));
-  __ SmiUntag(a3);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ Branch(&slow_resume, ne, a3, Operand(zero_reg));
-    __ ld(a3, FieldMemOperand(a4, JSFunction::kCodeEntryOffset));
-    __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(a2);
-    __ Daddu(a3, a3, Operand(a2));
-    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-    __ Jump(a3);
-    __ bind(&slow_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label push_operand_holes, call_resume;
-  __ bind(&push_operand_holes);
-  __ Dsubu(a3, a3, Operand(1));
-  __ Branch(&call_resume, lt, a3, Operand(zero_reg));
-  __ push(a2);
-  __ Branch(&push_operand_holes);
-  __ bind(&call_resume);
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  DCHECK(!result_register().is(a1));
-  __ Push(a1, result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ stop("not-reached");
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
   OperandStackDepthIncrement(2);
   __ Push(reg1, reg2);
@@ -2009,7 +1914,8 @@
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
 
-  __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ jmp(&done_allocate);
 
   __ bind(&allocate);
@@ -2335,8 +2241,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       __ Push(var->name());
       __ Push(v0);
@@ -2357,24 +2262,6 @@
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
 
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ li(a0, Operand(var->name()));
-      __ Push(v0, cp, a0);  // Context and name.
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, a1);
-      __ ld(a2, location);
-      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-      __ Branch(&skip, ne, a2, Operand(at));
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2398,7 +2285,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(v0);
 }
 
@@ -2449,44 +2336,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), v0);
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ Move(LoadDescriptor::NameRegister(), v0);
-      PopOperand(LoadDescriptor::ReceiverRegister());
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(v0);
 }
 
@@ -2507,7 +2357,7 @@
   if (callee->IsVariableProxy()) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
@@ -2520,7 +2370,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     __ ld(at, MemOperand(sp, 0));
     PushOperand(at);
@@ -2557,6 +2408,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ sd(v0, MemOperand(sp, kPointerSize));
@@ -2581,7 +2433,8 @@
   __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
   __ Move(LoadDescriptor::NameRegister(), v0);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   __ ld(at, MemOperand(sp, 0));
@@ -2615,6 +2468,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ sd(v0, MemOperand(sp, kPointerSize));
@@ -2634,7 +2488,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Record source position of the IC call.
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
@@ -2656,13 +2510,12 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, v0);
 }
 
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
   // a6: copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ ld(a6, MemOperand(sp, arg_count * kPointerSize));
@@ -2679,8 +2532,11 @@
   // a1: the start position of the scope the calls resides in.
   __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
 
+  // a0: the source position of the eval call.
+  __ li(a0, Operand(Smi::FromInt(expr->position())));
+
   // Do the runtime call.
-  __ Push(a6, a5, a4, a1);
+  __ Push(a6, a5, a4, a1, a0);
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
 }
 
@@ -2702,7 +2558,7 @@
     __ Push(callee->name());
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperands(v0, v1);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the
     // function and receiver and have the slow path jump around this
@@ -2729,7 +2585,7 @@
 
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
   // to resolve the function we need to call.  Then we call the resolved
   // function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2745,12 +2601,12 @@
   // resolve eval.
   __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ push(a1);
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the stack with the resolved function.
   __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
   // Record source position for debugger.
   SetCallPosition(expr);
   __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2760,8 +2616,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, v0);
 }
 
@@ -2800,9 +2655,8 @@
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(v0);
 }
 
@@ -2845,9 +2699,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->Plug(v0);
 }
 
@@ -3252,7 +3104,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to a1.
   int const argc = args->length() - 2;
   __ ld(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3260,8 +3112,7 @@
   __ li(a0, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, v0);
 }
@@ -3312,12 +3163,6 @@
   context()->Plug(v0);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, v0);
-  context()->Plug(v0);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3337,7 +3182,8 @@
 
   Label runtime, done;
 
-  __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime,
+              NO_ALLOCATION_FLAGS);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
   __ Pop(a2, a3);
   __ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
@@ -3378,9 +3224,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 
@@ -3465,12 +3309,14 @@
                         &materialize_true);
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(v0, Heap::kTrueValueRootIndex);
         if (context()->IsStackValue()) __ push(v0);
         __ jmp(&done);
         __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(v0, Heap::kFalseValueRootIndex);
         if (context()->IsStackValue()) __ push(v0);
         __ bind(&done);
@@ -3570,9 +3416,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3623,7 +3469,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3669,7 +3515,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(v0);
         }
         // For all contexts except EffectConstant we have the result on
@@ -3680,7 +3527,8 @@
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(v0);
       }
       break;
@@ -3691,7 +3539,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3731,7 +3579,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3828,7 +3676,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3848,7 +3695,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(a4, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
@@ -3856,6 +3704,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       __ mov(a0, result_register());
       PopOperand(a1);
       InstanceOfStub stub(isolate());
@@ -3868,6 +3717,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cc = CompareIC::ComputeCondition(op);
       __ mov(a0, result_register());
       PopOperand(a1);
diff --git a/src/full-codegen/ppc/full-codegen-ppc.cc b/src/full-codegen/ppc/full-codegen-ppc.cc
index 301ccf5..50248c1 100644
--- a/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -182,7 +182,8 @@
       __ push(r4);
       __ Push(info->scope()->GetScopeInfo(info->isolate()));
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -238,7 +239,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -300,7 +302,8 @@
   }
 
   // Visit the declarations and body.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -313,7 +316,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
     __ cmpl(sp, ip);
@@ -390,11 +394,11 @@
   EmitProfilingCounterReset();
 
   __ bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -450,6 +454,9 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -690,7 +697,7 @@
 
   Label skip;
   if (should_normalize) __ b(&skip);
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
     __ cmp(r3, ip);
@@ -723,15 +730,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(),
-                    zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -750,7 +755,7 @@
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ StoreP(ip, ContextMemOperand(cp, variable->index()), r0);
         // No write barrier since the_hole_value is in old space.
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+        PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -771,6 +776,7 @@
       __ Push(r5, r3);
       __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -812,7 +818,7 @@
       __ RecordWriteContextSlot(cp, offset, result_register(), r5,
                                 kLRHasBeenSaved, kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -824,6 +830,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -855,7 +862,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -904,7 +911,7 @@
 
     Label skip;
     __ b(&skip);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
     __ cmp(r3, ip);
     __ bne(&next_test);
@@ -933,12 +940,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -971,15 +978,13 @@
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
   __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ push(r3);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime;
   __ CheckEnumCache(&call_runtime);
 
@@ -993,7 +998,7 @@
   __ bind(&call_runtime);
   __ push(r3);  // Duplicate the enumerable object on the stack.
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
 
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
@@ -1035,7 +1040,7 @@
   __ Push(r4, r3);  // Smi and array
   __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
   __ Push(r4);  // Fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ LoadSmiLiteral(r3, Smi::FromInt(0));
   __ Push(r3);  // Initial index.
 
@@ -1079,7 +1084,7 @@
   // just skip it.
   __ Push(r4, r6);  // Enumerable and current entry.
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ mr(r6, r3);
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   __ cmp(r3, r0);
@@ -1093,11 +1098,11 @@
   {
     EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1116,7 +1121,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1246,17 +1251,12 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ LoadP(r3, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
       __ bne(done);
-      if (local->mode() == CONST_LEGACY) {
-        __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
-      } else {  // LET || CONST
-        __ mov(r3, Operand(var->name()));
-        __ push(r3);
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ mov(r3, Operand(var->name()));
+      __ push(r3);
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ b(done);
   }
@@ -1280,7 +1280,7 @@
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1312,10 +1312,6 @@
           __ mov(r3, Operand(var->name()));
           __ push(r3);
           __ CallRuntime(Runtime::kThrowReferenceError);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
         }
         __ bind(&done);
         context()->Plug(r3);
@@ -1389,8 +1385,9 @@
   } else {
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in r3.
@@ -1426,7 +1423,7 @@
             __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1460,7 +1457,7 @@
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
@@ -1516,7 +1513,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1589,7 +1586,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1619,7 +1616,8 @@
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1639,7 +1637,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1654,7 +1653,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1723,23 +1721,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1757,7 +1759,7 @@
     }
 
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1769,7 +1771,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(r3);
       break;
     case NAMED_PROPERTY:
@@ -1798,21 +1800,26 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ b(&suspend);
   __ bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, r3 holds the generator object.
   __ RecordGeneratorContinuation();
-  __ pop(r4);
-  __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
-  __ bne(&resume);
-  __ push(result_register());
+  __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
+  __ LoadP(r3, FieldMemOperand(r3, JSGeneratorObject::kInputOffset));
+  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+  __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kReturn), r0);
+  __ blt(&resume);
+  __ Push(result_register());
+  __ bgt(&exception);
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -1829,7 +1836,7 @@
   __ beq(&post_runtime);
   __ push(r3);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ bind(&post_runtime);
   PopOperand(result_register());
   EmitReturnSequence();
@@ -1838,120 +1845,6 @@
   context()->Plug(result_register());
 }
 
-
-void FullCodeGenerator::EmitGeneratorResume(
-    Expression* generator, Expression* value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in r3, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // r4 will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(r4);
-
-  // Store input value into generator object.
-  __ StoreP(result_register(),
-            FieldMemOperand(r4, JSGeneratorObject::kInputOffset), r0);
-  __ mr(r5, result_register());
-  __ RecordWriteField(r4, JSGeneratorObject::kInputOffset, r5, r6,
-                      kLRHasBeenSaved, kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
-  __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
-
-  // Load receiver and store as the first argument.
-  __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
-  __ push(r5);
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadWordArith(
-      r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
-  Label argument_loop, push_frame;
-#if V8_TARGET_ARCH_PPC64
-  __ cmpi(r6, Operand::Zero());
-  __ beq(&push_frame);
-#else
-  __ SmiUntag(r6, SetRC);
-  __ beq(&push_frame, cr0);
-#endif
-  __ mtctr(r6);
-  __ bind(&argument_loop);
-  __ push(r5);
-  __ bdnz(&argument_loop);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ b(&resume_frame, SetLK);
-  __ b(&done);
-  __ bind(&resume_frame);
-  // lr = return address.
-  // fp = caller's frame pointer.
-  // cp = callee's context,
-  // r7 = callee's JS function.
-  __ PushStandardFrame(r7);
-
-  // Load the operand stack size.
-  __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
-  __ LoadP(r6, FieldMemOperand(r6, FixedArray::kLengthOffset));
-  __ SmiUntag(r6, SetRC);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  Label call_resume;
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ bne(&slow_resume, cr0);
-    __ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
-    {
-      ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
-      if (FLAG_enable_embedded_constant_pool) {
-        __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
-      }
-      __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
-      __ SmiUntag(r5);
-      __ add(ip, ip, r5);
-      __ LoadSmiLiteral(r5,
-                        Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
-      __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
-                r0);
-      __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-      __ Jump(ip);
-      __ bind(&slow_resume);
-    }
-  } else {
-    __ beq(&call_resume, cr0);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label operand_loop;
-  __ mtctr(r6);
-  __ bind(&operand_loop);
-  __ push(r5);
-  __ bdnz(&operand_loop);
-
-  __ bind(&call_resume);
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  DCHECK(!result_register().is(r4));
-  __ Push(r4, result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ stop("not-reached");
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
   OperandStackDepthIncrement(2);
   __ Push(reg1, reg2);
@@ -1987,7 +1880,8 @@
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
 
-  __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ b(&done_allocate);
 
   __ bind(&allocate);
@@ -2346,8 +2240,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2368,25 +2261,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ push(r3);
-      __ mov(r3, Operand(var->name()));
-      __ Push(cp, r3);  // Context and name.
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, r4);
-      __ LoadP(r5, location);
-      __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
-      __ bne(&skip);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2409,7 +2283,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r3);
 }
 
@@ -2454,44 +2328,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), r3);
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ Move(LoadDescriptor::NameRegister(), r3);
-      PopOperand(LoadDescriptor::ReceiverRegister());
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r3);
 }
 
@@ -2512,7 +2349,7 @@
     {
       StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
@@ -2525,7 +2362,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     __ LoadP(r0, MemOperand(sp, 0));
     PushOperand(r0);
@@ -2562,6 +2400,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2585,7 +2424,8 @@
   __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
   __ Move(LoadDescriptor::NameRegister(), r3);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   __ LoadP(ip, MemOperand(sp, 0));
@@ -2619,6 +2459,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2638,7 +2479,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
@@ -2659,13 +2500,13 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, r3);
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
   // r7: copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ LoadP(r7, MemOperand(sp, arg_count * kPointerSize), r0);
@@ -2682,8 +2523,11 @@
   // r4: the start position of the scope the calls resides in.
   __ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
 
+  // r3: the source position of the eval call.
+  __ LoadSmiLiteral(r3, Smi::FromInt(expr->position()));
+
   // Do the runtime call.
-  __ Push(r7, r6, r5, r4);
+  __ Push(r7, r6, r5, r4, r3);
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
 }
 
@@ -2704,7 +2548,7 @@
     __ Push(callee->name());
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperands(r3, r4);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2731,9 +2575,9 @@
 
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
-  // to resolve the function we need to call.  Then we call the resolved
-  // function using the given arguments.
+  // In a call to eval, we first call
+  // Runtime_ResolvePossiblyDirectEval to resolve the function we need
+  // to call.  Then we call the resolved function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
 
@@ -2748,12 +2592,12 @@
   // resolve eval.
   __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
   __ push(r4);
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the stack with the resolved function.
   __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   // Record source position for debugger.
   SetCallPosition(expr);
@@ -2764,8 +2608,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, r3);
 }
 
@@ -2804,9 +2647,8 @@
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(r3);
 }
 
@@ -2849,9 +2691,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->Plug(r3);
 }
 
@@ -3230,7 +3070,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to r4.
   int const argc = args->length() - 2;
   __ LoadP(r4, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3238,8 +3078,7 @@
   __ mov(r3, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, r3);
 }
@@ -3291,12 +3130,6 @@
   context()->Plug(r3);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r3);
-  context()->Plug(r3);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3316,7 +3149,8 @@
 
   Label runtime, done;
 
-  __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime,
+              NO_ALLOCATION_FLAGS);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
   __ Pop(r5, r6);
   __ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
@@ -3357,9 +3191,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 
@@ -3439,12 +3271,14 @@
                         &materialize_true, &materialize_true);
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(r3, Heap::kTrueValueRootIndex);
         if (context()->IsStackValue()) __ push(r3);
         __ b(&done);
         __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(r3, Heap::kFalseValueRootIndex);
         if (context()->IsStackValue()) __ push(r3);
         __ bind(&done);
@@ -3544,9 +3378,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3598,7 +3432,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3645,7 +3479,8 @@
           EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(r3);
         }
         // For all contexts except EffectConstant We have the result on
@@ -3656,7 +3491,8 @@
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(r3);
       }
       break;
@@ -3666,7 +3502,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3705,7 +3541,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3804,7 +3640,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3824,7 +3659,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r3, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -3832,6 +3668,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       PopOperand(r4);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
@@ -3843,6 +3680,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cond = CompareIC::ComputeCondition(op);
       PopOperand(r4);
 
diff --git a/src/full-codegen/s390/full-codegen-s390.cc b/src/full-codegen/s390/full-codegen-s390.cc
index 88bec4c..0d2107d 100644
--- a/src/full-codegen/s390/full-codegen-s390.cc
+++ b/src/full-codegen/s390/full-codegen-s390.cc
@@ -187,7 +187,8 @@
       __ push(r3);
       __ Push(info->scope()->GetScopeInfo(info->isolate()));
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -243,7 +244,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -307,7 +309,8 @@
   }
 
   // Visit the declarations and body.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -320,7 +323,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
     __ CmpLogicalP(sp, ip);
@@ -392,11 +396,11 @@
   EmitProfilingCounterReset();
 
   __ bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -454,6 +458,10 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
@@ -670,7 +678,7 @@
 
   Label skip;
   if (should_normalize) __ b(&skip);
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ CompareRoot(r2, Heap::kTrueValueRootIndex);
     Split(eq, if_true, if_false, NULL);
@@ -700,15 +708,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(),
-                    zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -727,7 +733,7 @@
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ StoreP(ip, ContextMemOperand(cp, variable->index()));
         // No write barrier since the_hole_value is in old space.
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+        PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -748,6 +754,7 @@
       __ Push(r4, r2);
       __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -787,7 +794,7 @@
       __ RecordWriteContextSlot(cp, offset, result_register(), r4,
                                 kLRHasBeenSaved, kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -799,6 +806,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -827,7 +835,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -877,7 +885,7 @@
 
     Label skip;
     __ b(&skip);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ CompareRoot(r2, Heap::kTrueValueRootIndex);
     __ bne(&next_test);
     __ Drop(1);
@@ -905,12 +913,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
@@ -942,15 +950,13 @@
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
   __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ push(r2);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime;
   __ CheckEnumCache(&call_runtime);
 
@@ -964,7 +970,7 @@
   __ bind(&call_runtime);
   __ push(r2);  // Duplicate the enumerable object on the stack.
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
 
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
@@ -1005,7 +1011,7 @@
   __ Push(r3, r2);                         // Smi and array
   __ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
   __ Push(r3);  // Fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ LoadSmiLiteral(r2, Smi::FromInt(0));
   __ Push(r2);  // Initial index.
 
@@ -1049,7 +1055,7 @@
   // just skip it.
   __ Push(r3, r5);  // Enumerable and current entry.
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ LoadRR(r5, r2);
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   __ CmpP(r2, r0);
@@ -1063,11 +1069,11 @@
   {
     EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1086,7 +1092,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1210,17 +1216,12 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ LoadP(r2, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
       __ bne(done);
-      if (local->mode() == CONST_LEGACY) {
-        __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-      } else {  // LET || CONST
-        __ mov(r2, Operand(var->name()));
-        __ push(r2);
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ mov(r2, Operand(var->name()));
+      __ push(r2);
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ b(done);
   }
@@ -1242,7 +1243,7 @@
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1274,10 +1275,6 @@
           __ mov(r2, Operand(var->name()));
           __ push(r2);
           __ CallRuntime(Runtime::kThrowReferenceError);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
         }
         __ bind(&done);
         context()->Plug(r2);
@@ -1348,8 +1345,9 @@
   } else {
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in r2.
@@ -1385,7 +1383,7 @@
             __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1419,7 +1417,7 @@
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
@@ -1475,7 +1473,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1547,7 +1545,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1577,7 +1575,8 @@
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1597,7 +1596,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1611,7 +1611,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1680,23 +1679,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1714,7 +1717,7 @@
     }
 
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1726,7 +1729,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(r2);
       break;
     case NAMED_PROPERTY:
@@ -1754,21 +1757,26 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ b(&suspend);
   __ bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, r2 holds the generator object.
   __ RecordGeneratorContinuation();
-  __ pop(r3);
-  __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::RETURN), r0);
-  __ bne(&resume);
-  __ push(result_register());
+  __ LoadP(r3, FieldMemOperand(r2, JSGeneratorObject::kResumeModeOffset));
+  __ LoadP(r2, FieldMemOperand(r2, JSGeneratorObject::kInputOffset));
+  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+  __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::kReturn), r0);
+  __ blt(&resume);
+  __ Push(result_register());
+  __ bgt(&exception);
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -1785,7 +1793,7 @@
   __ beq(&post_runtime);
   __ push(r2);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ bind(&post_runtime);
   PopOperand(result_register());
   EmitReturnSequence();
@@ -1794,113 +1802,6 @@
   context()->Plug(result_register());
 }
 
-void FullCodeGenerator::EmitGeneratorResume(
-    Expression* generator, Expression* value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in r2, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // r3 will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(r3);
-
-  // Store input value into generator object.
-  __ StoreP(result_register(),
-            FieldMemOperand(r3, JSGeneratorObject::kInputOffset), r0);
-  __ LoadRR(r4, result_register());
-  __ RecordWriteField(r3, JSGeneratorObject::kInputOffset, r4, r5,
-                      kLRHasBeenSaved, kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
-  __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
-
-  // Load receiver and store as the first argument.
-  __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
-  __ push(r4);
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadW(
-      r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
-  Label argument_loop, push_frame;
-#if V8_TARGET_ARCH_S390X
-  __ CmpP(r5, Operand::Zero());
-  __ beq(&push_frame, Label::kNear);
-#else
-  __ SmiUntag(r5);
-  __ beq(&push_frame, Label::kNear);
-#endif
-  __ LoadRR(r0, r5);
-  __ bind(&argument_loop);
-  __ push(r4);
-  __ SubP(r0, Operand(1));
-  __ bne(&argument_loop);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ b(r14, &resume_frame);  // brasl
-  __ b(&done);
-  __ bind(&resume_frame);
-  // lr = return address.
-  // fp = caller's frame pointer.
-  // cp = callee's context,
-  // r6 = callee's JS function.
-  __ PushStandardFrame(r6);
-
-  // Load the operand stack size.
-  __ LoadP(r5, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
-  __ LoadP(r5, FieldMemOperand(r5, FixedArray::kLengthOffset));
-  __ SmiUntag(r5);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  Label call_resume;
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ bne(&slow_resume, Label::kNear);
-    __ LoadP(ip, FieldMemOperand(r6, JSFunction::kCodeEntryOffset));
-    __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(r4);
-    __ AddP(ip, ip, r4);
-    __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
-    __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
-    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-    __ Jump(ip);
-    __ bind(&slow_resume);
-  } else {
-    __ beq(&call_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label operand_loop;
-  __ LoadRR(r0, r5);
-  __ bind(&operand_loop);
-  __ push(r4);
-  __ SubP(r0, Operand(1));
-  __ bne(&operand_loop);
-
-  __ bind(&call_resume);
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  DCHECK(!result_register().is(r3));
-  __ Push(r3, result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ stop("not-reached");
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
   OperandStackDepthIncrement(2);
   __ Push(reg1, reg2);
@@ -1936,7 +1837,8 @@
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
 
-  __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ b(&done_allocate);
 
   __ bind(&allocate);
@@ -2292,8 +2194,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2314,25 +2215,6 @@
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ push(r2);
-      __ mov(r2, Operand(var->name()));
-      __ Push(cp, r2);  // Context and name.
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, r3);
-      __ LoadP(r4, location);
-      __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
-      __ bne(&skip);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2354,7 +2236,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r2);
 }
 
@@ -2396,43 +2278,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(r2);
-}
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), r2);
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ Move(LoadDescriptor::NameRegister(), r2);
-      PopOperand(LoadDescriptor::ReceiverRegister());
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(r2);
 }
 
@@ -2451,7 +2297,7 @@
     {
       StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
@@ -2464,7 +2310,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     __ LoadP(r1, MemOperand(sp, 0));
     PushOperand(r1);
@@ -2500,6 +2347,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ StoreP(r2, MemOperand(sp, kPointerSize));
@@ -2522,7 +2370,8 @@
   __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
   __ Move(LoadDescriptor::NameRegister(), r2);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   __ LoadP(ip, MemOperand(sp, 0));
@@ -2555,6 +2404,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ StoreP(r2, MemOperand(sp, kPointerSize));
@@ -2573,7 +2423,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
@@ -2594,12 +2444,12 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, r2);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
   // r6: copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ LoadP(r6, MemOperand(sp, arg_count * kPointerSize), r0);
@@ -2616,8 +2466,11 @@
   // r3: the start position of the scope the calls resides in.
   __ LoadSmiLiteral(r3, Smi::FromInt(scope()->start_position()));
 
+  // r2: the source position of the eval call.
+  __ LoadSmiLiteral(r2, Smi::FromInt(expr->position()));
+
   // Do the runtime call.
-  __ Push(r6, r5, r4, r3);
+  __ Push(r6, r5, r4, r3, r2);
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
 }
 
@@ -2637,7 +2490,7 @@
     __ Push(callee->name());
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperands(r2, r3);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2663,9 +2516,9 @@
 }
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
-  // to resolve the function we need to call.  Then we call the resolved
-  // function using the given arguments.
+  // In a call to eval, we first call
+  // Runtime_ResolvePossiblyDirectEval to resolve the function we need
+  // to call.  Then we call the resolved function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
 
@@ -2680,12 +2533,12 @@
   // resolve eval.
   __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
   __ push(r3);
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the stack with the resolved function.
   __ StoreP(r2, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   // Record source position for debugger.
   SetCallPosition(expr);
@@ -2696,8 +2549,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, r2);
 }
 
@@ -2735,9 +2587,8 @@
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(r2);
 }
 
@@ -2779,9 +2630,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->Plug(r2);
 }
 
@@ -3146,7 +2995,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to r3.
   int const argc = args->length() - 2;
   __ LoadP(r3, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3154,8 +3003,7 @@
   __ mov(r2, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, r2);
 }
@@ -3202,12 +3050,6 @@
   context()->Plug(r2);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r2);
-  context()->Plug(r2);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3226,7 +3068,8 @@
 
   Label runtime, done;
 
-  __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime,
+              NO_ALLOCATION_FLAGS);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
   __ Pop(r4, r5);
   __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
@@ -3265,9 +3108,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
@@ -3346,12 +3187,14 @@
                         &materialize_true, &materialize_true);
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(r2, Heap::kTrueValueRootIndex);
         if (context()->IsStackValue()) __ push(r2);
         __ b(&done);
         __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         __ LoadRoot(r2, Heap::kFalseValueRootIndex);
         if (context()->IsStackValue()) __ push(r2);
         __ bind(&done);
@@ -3450,9 +3293,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3504,7 +3347,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3551,7 +3394,8 @@
           EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(r2);
         }
         // For all contexts except EffectConstant We have the result on
@@ -3562,7 +3406,8 @@
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(r2);
       }
       break;
@@ -3572,7 +3417,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3611,7 +3456,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3705,7 +3550,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3725,7 +3569,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r2, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -3733,6 +3578,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       PopOperand(r3);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
@@ -3744,6 +3590,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cond = CompareIC::ComputeCondition(op);
       PopOperand(r3);
 
diff --git a/src/full-codegen/x64/full-codegen-x64.cc b/src/full-codegen/x64/full-codegen-x64.cc
index 992e7fe..1ef9cee 100644
--- a/src/full-codegen/x64/full-codegen-x64.cc
+++ b/src/full-codegen/x64/full-codegen-x64.cc
@@ -166,7 +166,8 @@
       __ Push(rdi);
       __ Push(info->scope()->GetScopeInfo(info->isolate()));
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -222,7 +223,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -286,7 +288,8 @@
 
   // Visit the declarations and body unless there is an illegal
   // redeclaration.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -299,7 +302,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
     __ j(above_equal, &ok, Label::kNear);
@@ -372,11 +376,11 @@
   }
   __ bind(&ok);
 
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -425,6 +429,9 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -689,7 +696,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ CompareRoot(rax, Heap::kTrueValueRootIndex);
     Split(equal, if_true, if_false, NULL);
@@ -720,15 +727,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                    : isolate()->factory()->undefined_value(),
-                    zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -747,7 +752,7 @@
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movp(ContextOperand(rsi, variable->index()), kScratchRegister);
         // No write barrier since the hole value is in old space.
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+        PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -767,6 +772,7 @@
       }
       __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -811,7 +817,7 @@
                                 kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET,
                                 OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -821,6 +827,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -851,7 +858,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -901,7 +908,7 @@
 
     Label skip;
     __ jmp(&skip, Label::kNear);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ CompareRoot(rax, Heap::kTrueValueRootIndex);
     __ j(not_equal, &next_test);
     __ Drop(1);
@@ -929,12 +936,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -967,15 +974,13 @@
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
   __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ Push(rax);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime;
   __ CheckEnumCache(&call_runtime);
 
@@ -989,7 +994,7 @@
   __ bind(&call_runtime);
   __ Push(rax);  // Duplicate the enumerable object on the stack.
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
 
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
@@ -1031,7 +1036,7 @@
   __ Push(rax);  // Array
   __ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   __ Push(rax);  // Fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ Push(Smi::FromInt(0));  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1073,7 +1078,7 @@
   __ Push(rcx);  // Enumerable.
   __ Push(rbx);  // Current entry.
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   __ j(equal, loop_statement.continue_label());
   __ movp(rbx, rax);
@@ -1085,11 +1090,11 @@
   // Perform the assignment as if via '='.
   { EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1106,7 +1111,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1237,16 +1242,11 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
       __ j(not_equal, done);
-      if (local->mode() == CONST_LEGACY) {
-        __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-      } else {  // LET || CONST
-        __ Push(var->name());
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ Push(var->name());
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ jmp(done);
   }
@@ -1270,7 +1270,7 @@
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1301,10 +1301,6 @@
           // binding in harmony mode.
           __ Push(var->name());
           __ CallRuntime(Runtime::kThrowReferenceError);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
         }
         __ bind(&done);
         context()->Plug(rax);
@@ -1382,8 +1378,9 @@
     __ Move(rdx, Smi::FromInt(flags));
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in rax.
@@ -1419,7 +1416,7 @@
             __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1448,7 +1445,7 @@
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
@@ -1502,7 +1499,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1575,7 +1572,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1605,7 +1602,8 @@
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1625,7 +1623,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1640,7 +1639,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1704,23 +1702,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1739,7 +1741,7 @@
       EmitBinaryOp(expr->binary_operation(), op);
     }
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1751,7 +1753,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(rax);
       break;
     case NAMED_PROPERTY:
@@ -1780,21 +1782,26 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ jmp(&suspend);
   __ bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, rax holds the generator object.
   __ RecordGeneratorContinuation();
-  __ Pop(rbx);
-  __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::RETURN));
-  __ j(not_equal, &resume);
+  __ movp(rbx, FieldOperand(rax, JSGeneratorObject::kResumeModeOffset));
+  __ movp(rax, FieldOperand(rax, JSGeneratorObject::kInputOffset));
+  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+  __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::kReturn));
+  __ j(less, &resume);
   __ Push(result_register());
+  __ j(greater, &exception);
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -1810,8 +1817,7 @@
   __ j(equal, &post_runtime);
   __ Push(rax);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ movp(context_register(),
-          Operand(rbp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ bind(&post_runtime);
 
   PopOperand(result_register());
@@ -1821,102 +1827,6 @@
   context()->Plug(result_register());
 }
 
-
-void FullCodeGenerator::EmitGeneratorResume(
-    Expression* generator, Expression* value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in rax, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // rbx will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(rbx);
-
-  // Store input value into generator object.
-  __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOffset),
-          result_register());
-  __ movp(rcx, result_register());
-  __ RecordWriteField(rbx, JSGeneratorObject::kInputOffset, rcx, rdx,
-                      kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
-  __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
-
-  // Push receiver.
-  __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadSharedFunctionInfoSpecialField(rdx, rdx,
-      SharedFunctionInfo::kFormalParameterCountOffset);
-  __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
-  Label push_argument_holes, push_frame;
-  __ bind(&push_argument_holes);
-  __ subp(rdx, Immediate(1));
-  __ j(carry, &push_frame);
-  __ Push(rcx);
-  __ jmp(&push_argument_holes);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ call(&resume_frame);
-  __ jmp(&done);
-  __ bind(&resume_frame);
-  __ pushq(rbp);  // Caller's frame pointer.
-  __ movp(rbp, rsp);
-  __ Push(rsi);  // Callee's context.
-  __ Push(rdi);  // Callee's JS Function.
-
-  // Load the operand stack size.
-  __ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
-  __ movp(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
-  __ SmiToInteger32(rdx, rdx);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ cmpp(rdx, Immediate(0));
-    __ j(not_zero, &slow_resume);
-    __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-    __ SmiToInteger64(rcx,
-        FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
-    __ addp(rdx, rcx);
-    __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
-            Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
-    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-    __ jmp(rdx);
-    __ bind(&slow_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label push_operand_holes, call_resume;
-  __ bind(&push_operand_holes);
-  __ subp(rdx, Immediate(1));
-  __ j(carry, &call_resume);
-  __ Push(rcx);
-  __ jmp(&push_operand_holes);
-  __ bind(&call_resume);
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  __ Push(rbx);
-  __ Push(result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ Abort(kGeneratorFailedToResume);
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperand(MemOperand operand) {
   OperandStackDepthIncrement(1);
   __ Push(operand);
@@ -1936,7 +1846,8 @@
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
 
-  __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ jmp(&done_allocate, Label::kNear);
 
   __ bind(&allocate);
@@ -2222,8 +2133,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(var->name());
@@ -2245,25 +2155,6 @@
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
 
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ Push(rax);
-      __ Push(rsi);
-      __ Push(var->name());
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackLocal() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, rcx);
-      __ movp(rdx, location);
-      __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, &skip);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2285,7 +2176,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(rax);
 }
 
@@ -2329,45 +2220,7 @@
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
 
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      DCHECK(!rax.is(LoadDescriptor::ReceiverRegister()));
-      __ movp(LoadDescriptor::ReceiverRegister(), rax);
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ Move(LoadDescriptor::NameRegister(), rax);
-      PopOperand(LoadDescriptor::ReceiverRegister());
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(rax);
 }
 
@@ -2388,7 +2241,7 @@
   if (callee->IsVariableProxy()) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the Call builtin if it
     // is a sloppy mode method.
@@ -2400,7 +2253,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     PushOperand(Operand(rsp, 0));
     __ movp(Operand(rsp, kPointerSize), rax);
@@ -2436,6 +2290,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ movp(Operand(rsp, kPointerSize), rax);
@@ -2460,7 +2315,8 @@
   __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
   __ Move(LoadDescriptor::NameRegister(), rax);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   PushOperand(Operand(rsp, 0));
@@ -2493,6 +2349,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ movp(Operand(rsp, kPointerSize), rax);
@@ -2512,7 +2369,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
@@ -2533,15 +2390,13 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, rax);
 }
 
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ Push(Operand(rsp, arg_count * kPointerSize));
@@ -2558,6 +2413,9 @@
   // Push the start position of the scope the calls resides in.
   __ Push(Smi::FromInt(scope()->start_position()));
 
+  // Push the source position of the eval call.
+  __ Push(Smi::FromInt(expr->position()));
+
   // Do the runtime call.
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
 }
@@ -2579,7 +2437,7 @@
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperand(rax);  // Function.
     PushOperand(rdx);  // Receiver.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2605,7 +2463,7 @@
 
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
   // to resolve the function we need to call.  Then we call the resolved
   // function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2620,12 +2478,12 @@
   // Push a copy of the function (found below the arguments) and resolve
   // eval.
   __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the callee.
   __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   SetCallPosition(expr);
   __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
@@ -2635,8 +2493,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, rax);
 }
 
@@ -2675,9 +2532,8 @@
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(rax);
 }
 
@@ -2718,10 +2574,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
+  RestoreContext();
   context()->Plug(rax);
 }
 
@@ -3113,7 +2966,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to rdi.
   int const argc = args->length() - 2;
   __ movp(rdi, Operand(rsp, (argc + 1) * kPointerSize));
@@ -3121,8 +2974,7 @@
   __ Set(rax, argc);
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, rax);
 }
@@ -3176,12 +3028,6 @@
   context()->Plug(rax);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, rax);
-  context()->Plug(rax);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3201,7 +3047,8 @@
 
   Label runtime, done;
 
-  __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime,
+              NO_ALLOCATION_FLAGS);
   __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, rbx);
   __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
   __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
@@ -3241,9 +3088,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 
@@ -3328,7 +3173,8 @@
                         &materialize_true);
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
           __ LoadRoot(rax, Heap::kTrueValueRootIndex);
         } else {
@@ -3336,7 +3182,8 @@
         }
         __ jmp(&done, Label::kNear);
         __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
           __ LoadRoot(rax, Heap::kFalseValueRootIndex);
         } else {
@@ -3435,9 +3282,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3490,7 +3337,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3538,7 +3385,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(rax);
         }
         // For all contexts except kEffect: We have the result on
@@ -3550,7 +3398,8 @@
         // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(rax);
       }
       break;
@@ -3560,7 +3409,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3599,7 +3448,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3695,7 +3544,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3715,7 +3563,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
@@ -3723,6 +3572,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       PopOperand(rdx);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
@@ -3734,6 +3584,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cc = CompareIC::ComputeCondition(op);
       PopOperand(rdx);
 
diff --git a/src/full-codegen/x87/full-codegen-x87.cc b/src/full-codegen/x87/full-codegen-x87.cc
index f14aaf6..d7403fa 100644
--- a/src/full-codegen/x87/full-codegen-x87.cc
+++ b/src/full-codegen/x87/full-codegen-x87.cc
@@ -168,7 +168,8 @@
       __ push(edi);
       __ Push(info->scope()->GetScopeInfo(info->isolate()));
       __ CallRuntime(Runtime::kNewScriptContext);
-      PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+      PrepareForBailoutForId(BailoutId::ScriptContext(),
+                             BailoutState::TOS_REGISTER);
       // The new target value is not used, clobbering is safe.
       DCHECK_NULL(info->scope()->new_target_var());
     } else {
@@ -224,7 +225,8 @@
   // Register holding this function and new target are both trashed in case we
   // bailout here. But since that can happen only when new target is not used
   // and we allocate a context, the value of |function_in_register| is correct.
-  PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionContext(),
+                         BailoutState::NO_REGISTERS);
 
   // Possibly set up a local binding to the this function which is used in
   // derived constructors with super calls.
@@ -286,7 +288,8 @@
   }
 
   // Visit the declarations and body.
-  PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+  PrepareForBailoutForId(BailoutId::FunctionEntry(),
+                         BailoutState::NO_REGISTERS);
   {
     Comment cmnt(masm_, "[ Declarations");
     VisitDeclarations(scope()->declarations());
@@ -299,7 +302,8 @@
 
   {
     Comment cmnt(masm_, "[ Stack check");
-    PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+    PrepareForBailoutForId(BailoutId::Declarations(),
+                           BailoutState::NO_REGISTERS);
     Label ok;
     ExternalReference stack_limit =
         ExternalReference::address_of_stack_limit(isolate());
@@ -366,11 +370,11 @@
   EmitProfilingCounterReset();
 
   __ bind(&ok);
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
   // entry becomes the target of a bailout.  We don't expect it to be, but
   // we want it to work if it is.
-  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
 }
 
 void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -420,6 +424,9 @@
   }
 }
 
+void FullCodeGenerator::RestoreContext() {
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -674,7 +681,7 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-  PrepareForBailout(expr, TOS_REG);
+  PrepareForBailout(expr, BailoutState::TOS_REGISTER);
   if (should_normalize) {
     __ cmp(eax, isolate()->factory()->true_value());
     Split(equal, if_true, if_false, NULL);
@@ -705,14 +712,13 @@
   VariableProxy* proxy = declaration->proxy();
   VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  bool hole_init = mode == LET || mode == CONST;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals_->Add(variable->name(), zone());
-      globals_->Add(variable->binding_needs_init()
-                        ? isolate()->factory()->the_hole_value()
-                        : isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
 
     case VariableLocation::PARAMETER:
@@ -731,7 +737,7 @@
         __ mov(ContextOperand(esi, variable->index()),
                Immediate(isolate()->factory()->the_hole_value()));
         // No write barrier since the hole value is in old space.
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+        PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       }
       break;
 
@@ -752,6 +758,7 @@
       __ push(
           Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
       __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -790,7 +797,7 @@
       __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()),
                                 result_register(), ecx, kDontSaveFPRegs,
                                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
 
@@ -800,6 +807,7 @@
       VisitForStackValue(declaration->fun());
       PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
       CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
       break;
     }
   }
@@ -830,7 +838,7 @@
 
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
-  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -879,7 +887,7 @@
 
     Label skip;
     __ jmp(&skip, Label::kNear);
-    PrepareForBailout(clause, TOS_REG);
+    PrepareForBailout(clause, BailoutState::TOS_REGISTER);
     __ cmp(eax, isolate()->factory()->true_value());
     __ j(not_equal, &next_test);
     __ Drop(1);
@@ -907,12 +915,12 @@
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
     __ bind(clause->body_target());
-    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
     VisitStatements(clause->statements());
   }
 
   __ bind(nested_statement.break_label());
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
 }
 
 
@@ -945,15 +953,13 @@
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
   __ bind(&done_convert);
-  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
   __ push(eax);
 
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  // Note: Proxies never have an enum cache, so will always take the
-  // slow path.
+  // Check cache validity in generated code. If we cannot guarantee cache
+  // validity, call the runtime system to check cache validity or get the
+  // property names in a fixed array. Note: Proxies never have an enum cache,
+  // so will always take the slow path.
   Label call_runtime, use_cache, fixed_array;
   __ CheckEnumCache(&call_runtime);
 
@@ -964,7 +970,7 @@
   __ bind(&call_runtime);
   __ push(eax);
   __ CallRuntime(Runtime::kForInEnumerate);
-  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+  PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
   __ j(not_equal, &fixed_array);
@@ -996,11 +1002,11 @@
   // We got a fixed array in register eax. Iterate through that.
   __ bind(&fixed_array);
 
-  __ push(Immediate(Smi::FromInt(1)));  // Smi(1) undicates slow check
+  __ push(Immediate(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ push(eax);  // Array
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
-  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1038,7 +1044,7 @@
   __ push(ecx);  // Enumerable.
   __ push(ebx);  // Current entry.
   __ CallRuntime(Runtime::kForInFilter);
-  PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+  PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
   __ cmp(eax, isolate()->factory()->undefined_value());
   __ j(equal, loop_statement.continue_label());
   __ mov(ebx, eax);
@@ -1050,11 +1056,11 @@
   // Perform the assignment as if via '='.
   { EffectContext context(this);
     EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
-    PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+    PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
   }
 
   // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
   // Generate code for the body of the loop.
   Visit(stmt->body());
 
@@ -1071,7 +1077,7 @@
   DropOperands(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1199,16 +1205,11 @@
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == LET || local->mode() == CONST ||
-        local->mode() == CONST_LEGACY) {
+    if (local->mode() == LET || local->mode() == CONST) {
       __ cmp(eax, isolate()->factory()->the_hole_value());
       __ j(not_equal, done);
-      if (local->mode() == CONST_LEGACY) {
-        __ mov(eax, isolate()->factory()->undefined_value());
-      } else {  // LET || CONST
-        __ push(Immediate(var->name()));
-        __ CallRuntime(Runtime::kThrowReferenceError);
-      }
+      __ push(Immediate(var->name()));
+      __ CallRuntime(Runtime::kThrowReferenceError);
     }
     __ jmp(done);
   }
@@ -1234,7 +1235,7 @@
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   SetExpressionPosition(proxy);
-  PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+  PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
   // Three cases: global variables, lookup variables, and all other types of
@@ -1266,10 +1267,6 @@
           // binding in harmony mode.
           __ push(Immediate(var->name()));
           __ CallRuntime(Runtime::kThrowReferenceError);
-        } else {
-          // Uninitialized legacy const bindings are unholed.
-          DCHECK(var->mode() == CONST_LEGACY);
-          __ mov(eax, isolate()->factory()->undefined_value());
         }
         __ bind(&done);
         context()->Plug(eax);
@@ -1348,8 +1345,9 @@
     __ mov(edx, Immediate(Smi::FromInt(flags)));
     FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
     __ CallStub(&stub);
+    RestoreContext();
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   // If result_saved is true the result is on top of the stack.  If
   // result_saved is false the result is in eax.
@@ -1385,7 +1383,7 @@
             __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
             EmitLoadStoreICSlot(property->GetSlot(0));
             CallStoreIC();
-            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+            PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
             }
@@ -1413,7 +1411,7 @@
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                               NO_REGISTERS);
+                               BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
@@ -1469,7 +1467,7 @@
       DCHECK(property->emit_store());
       CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             NO_REGISTERS);
+                             BailoutState::NO_REGISTERS);
     } else {
       EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
       VisitForStackValue(value);
@@ -1542,7 +1540,7 @@
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
   }
-  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+  PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
 
   bool result_saved = false;  // Is the result saved to the stack?
   ZoneList<Expression*>* subexprs = expr->values();
@@ -1572,7 +1570,8 @@
     Handle<Code> ic =
         CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
     CallIC(ic);
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   // In case the array literal contains spread expressions it has two parts. The
@@ -1592,7 +1591,8 @@
     VisitForStackValue(subexpr);
     CallRuntimeWithOperands(Runtime::kAppendElement);
 
-    PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+    PrepareForBailoutForId(expr->GetIdForElement(array_index),
+                           BailoutState::NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1607,7 +1607,6 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
   Comment cmnt(masm_, "[ Assignment");
-  SetExpressionPosition(expr, INSERT_BREAK);
 
   Property* property = expr->target()->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
@@ -1672,23 +1671,27 @@
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy());
-          PrepareForBailout(expr->target(), TOS_REG);
+          PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
         case NAMED_SUPER_PROPERTY:
           EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_SUPER_PROPERTY:
           EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          PrepareForBailoutForId(property->LoadId(),
+                                 BailoutState::TOS_REGISTER);
           break;
       }
     }
@@ -1707,7 +1710,7 @@
     }
 
     // Deoptimization point in case the binary operation may have side effects.
-    PrepareForBailout(expr->binary_operation(), TOS_REG);
+    PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1719,7 +1722,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op(), expr->AssignmentSlot());
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       context()->Plug(eax);
       break;
     case NAMED_PROPERTY:
@@ -1748,21 +1751,26 @@
   // this.  It stays on the stack while we update the iterator.
   VisitForStackValue(expr->expression());
 
-  Label suspend, continuation, post_runtime, resume;
+  Label suspend, continuation, post_runtime, resume, exception;
 
   __ jmp(&suspend);
   __ bind(&continuation);
-  // When we arrive here, the stack top is the resume mode and
-  // result_register() holds the input value (the argument given to the
-  // respective resume operation).
+  // When we arrive here, eax holds the generator object.
   __ RecordGeneratorContinuation();
-  __ pop(ebx);
-  __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
-  __ j(not_equal, &resume);
-  __ push(result_register());
+  __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
+  __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOffset));
+  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+  __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
+  __ j(less, &resume);
+  __ Push(result_register());
+  __ j(greater, &exception);
   EmitCreateIteratorResult(true);
   EmitUnwindAndReturn();
 
+  __ bind(&exception);
+  __ CallRuntime(Runtime::kThrow);
+
   __ bind(&suspend);
   OperandStackDepthIncrement(1);  // Not popped on this path.
   VisitForAccumulatorValue(expr->generator_object());
@@ -1778,8 +1786,7 @@
   __ j(equal, &post_runtime);
   __ push(eax);  // generator object
   __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  __ mov(context_register(),
-         Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   __ bind(&post_runtime);
   PopOperand(result_register());
   EmitReturnSequence();
@@ -1788,101 +1795,6 @@
   context()->Plug(result_register());
 }
 
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
-    Expression *value,
-    JSGeneratorObject::ResumeMode resume_mode) {
-  // The value stays in eax, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
-  // is read to throw the value when the resumed generator is already closed.
-  // ebx will hold the generator object until the activation has been resumed.
-  VisitForStackValue(generator);
-  VisitForAccumulatorValue(value);
-  PopOperand(ebx);
-
-  // Store input value into generator object.
-  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), result_register());
-  __ mov(ecx, result_register());
-  __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, ecx, edx,
-                      kDontSaveFPRegs);
-
-  // Load suspended function and context.
-  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
-  __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
-
-  // Push receiver.
-  __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
-
-  // Push holes for arguments to generator function. Since the parser forced
-  // context allocation for any variables in generators, the actual argument
-  // values have already been copied into the context and these dummy values
-  // will never be used.
-  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(edx,
-         FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ mov(ecx, isolate()->factory()->the_hole_value());
-  Label push_argument_holes, push_frame;
-  __ bind(&push_argument_holes);
-  __ sub(edx, Immediate(Smi::FromInt(1)));
-  __ j(carry, &push_frame);
-  __ push(ecx);
-  __ jmp(&push_argument_holes);
-
-  // Enter a new JavaScript frame, and initialize its slots as they were when
-  // the generator was suspended.
-  Label resume_frame, done;
-  __ bind(&push_frame);
-  __ call(&resume_frame);
-  __ jmp(&done);
-  __ bind(&resume_frame);
-  __ push(ebp);  // Caller's frame pointer.
-  __ mov(ebp, esp);
-  __ push(esi);  // Callee's context.
-  __ push(edi);  // Callee's JS Function.
-
-  // Load the operand stack size.
-  __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
-  __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
-  __ SmiUntag(edx);
-
-  // If we are sending a value and there is no operand stack, we can jump back
-  // in directly.
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    Label slow_resume;
-    __ cmp(edx, Immediate(0));
-    __ j(not_zero, &slow_resume);
-    __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(ecx);
-    __ add(edx, ecx);
-    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
-           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-    __ jmp(edx);
-    __ bind(&slow_resume);
-  }
-
-  // Otherwise, we push holes for the operand stack and call the runtime to fix
-  // up the stack and the handlers.
-  Label push_operand_holes, call_resume;
-  __ bind(&push_operand_holes);
-  __ sub(edx, Immediate(1));
-  __ j(carry, &call_resume);
-  __ push(ecx);
-  __ jmp(&push_operand_holes);
-  __ bind(&call_resume);
-  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
-  __ push(ebx);
-  __ push(result_register());
-  __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kResumeJSGeneratorObject);
-  // Not reached: the runtime call returns elsewhere.
-  __ Abort(kGeneratorFailedToResume);
-
-  __ bind(&done);
-  context()->Plug(result_register());
-}
-
 void FullCodeGenerator::PushOperand(MemOperand operand) {
   OperandStackDepthIncrement(1);
   __ Push(operand);
@@ -1902,7 +1814,8 @@
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
 
-  __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate,
+              NO_ALLOCATION_FLAGS);
   __ jmp(&done_allocate, Label::kNear);
 
   __ bind(&allocate);
@@ -2224,8 +2137,7 @@
     __ bind(&uninitialized_this);
     EmitStoreToStackLocalOrContextSlot(var, location);
 
-  } else if (!var->is_const_mode() ||
-             (var->mode() == CONST && op == Token::INIT)) {
+  } else if (!var->is_const_mode() || op == Token::INIT) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
       __ Push(Immediate(var->name()));
@@ -2247,25 +2159,6 @@
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
 
-  } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
-    // Const initializers need a write barrier.
-    DCHECK(!var->IsParameter());  // No const parameters.
-    if (var->IsLookupSlot()) {
-      __ push(eax);
-      __ push(esi);
-      __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
-    } else {
-      DCHECK(var->IsStackLocal() || var->IsContextSlot());
-      Label skip;
-      MemOperand location = VarOperand(var, ecx);
-      __ mov(edx, location);
-      __ cmp(edx, isolate()->factory()->the_hole_value());
-      __ j(not_equal, &skip, Label::kNear);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-      __ bind(&skip);
-    }
-
   } else {
     DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
     if (is_strict(language_mode())) {
@@ -2288,7 +2181,7 @@
   PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(eax);
 }
 
@@ -2334,44 +2227,7 @@
       CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallIC(ic);
-  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
-  Comment cmnt(masm_, "[ Property");
-  SetExpressionPosition(expr);
-
-  Expression* key = expr->key();
-
-  if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), result_register());
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
-  } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      PopOperand(LoadDescriptor::ReceiverRegister());              // Object.
-      __ Move(LoadDescriptor::NameRegister(), result_register());  // Key.
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
-  }
-  PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+  PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
   context()->Plug(eax);
 }
 
@@ -2392,7 +2248,7 @@
   if (callee->IsVariableProxy()) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
-      PrepareForBailout(callee, NO_REGISTERS);
+      PrepareForBailout(callee, BailoutState::NO_REGISTERS);
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
@@ -2404,7 +2260,8 @@
     DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
-    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                           BailoutState::TOS_REGISTER);
     // Push the target function under the receiver.
     PushOperand(Operand(esp, 0));
     __ mov(Operand(esp, kPointerSize), eax);
@@ -2439,6 +2296,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ mov(Operand(esp, kPointerSize), eax);
@@ -2463,7 +2321,8 @@
   __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
   __ mov(LoadDescriptor::NameRegister(), eax);
   EmitKeyedPropertyLoad(callee->AsProperty());
-  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+                         BailoutState::TOS_REGISTER);
 
   // Push the target function under the receiver.
   PushOperand(Operand(esp, 0));
@@ -2495,6 +2354,7 @@
   //  - home_object
   //  - key
   CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
 
   // Replace home_object with target function.
   __ mov(Operand(esp, kPointerSize), eax);
@@ -2514,7 +2374,7 @@
     VisitForStackValue(args->at(i));
   }
 
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   SetCallPosition(expr, expr->tail_call_mode());
   if (expr->tail_call_mode() == TailCallMode::kAllow) {
     if (FLAG_trace) {
@@ -2535,15 +2395,12 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
+  RestoreContext();
   context()->DropAndPlug(1, eax);
 }
 
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+  int arg_count = expr->arguments()->length();
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(esp, arg_count * kPointerSize));
@@ -2560,6 +2417,9 @@
   // Push the start position of the scope the calls resides in.
   __ push(Immediate(Smi::FromInt(scope()->start_position())));
 
+  // Push the source position of the eval call.
+  __ push(Immediate(Smi::FromInt(expr->position())));
+
   // Do the runtime call.
   __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
 }
@@ -2582,7 +2442,7 @@
     __ CallRuntime(Runtime::kLoadLookupSlotForCall);
     PushOperand(eax);  // Function.
     PushOperand(edx);  // Receiver.
-    PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2606,7 +2466,7 @@
 
 
 void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
   // to resolve the function we need to call.  Then we call the resolved
   // function using the given arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2622,12 +2482,12 @@
   // Push a copy of the function (found below the arguments) and
   // resolve eval.
   __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-  EmitResolvePossiblyDirectEval(arg_count);
+  EmitResolvePossiblyDirectEval(expr);
 
   // Touch up the stack with the resolved function.
   __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
 
-  PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
 
   SetCallPosition(expr);
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -2637,8 +2497,7 @@
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->DropAndPlug(1, eax);
 }
 
@@ -2677,9 +2536,8 @@
   CallConstructStub stub(isolate());
   __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+  RestoreContext();
   context()->Plug(eax);
 }
 
@@ -2720,9 +2578,7 @@
   OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
-
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   context()->Plug(eax);
 }
 
@@ -3116,7 +2972,7 @@
   for (Expression* const arg : *args) {
     VisitForStackValue(arg);
   }
-  PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+  PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
   // Move target to edi.
   int const argc = args->length() - 2;
   __ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -3124,8 +2980,7 @@
   __ mov(eax, Immediate(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(argc + 1);
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
   // Discard the function left on TOS.
   context()->DropAndPlug(1, eax);
 }
@@ -3179,13 +3034,6 @@
   context()->Plug(eax);
 }
 
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
-  DCHECK_EQ(0, expr->arguments()->length());
-  __ mov(eax, NativeContextOperand());
-  __ mov(eax, ContextOperand(eax, Context::ORDINARY_HAS_INSTANCE_INDEX));
-  context()->Plug(eax);
-}
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3204,7 +3052,8 @@
 
   Label runtime, done;
 
-  __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
+  __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime,
+              NO_ALLOCATION_FLAGS);
   __ mov(ebx, NativeContextOperand());
   __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
   __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
@@ -3245,9 +3094,7 @@
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
   OperandStackDepthDecrement(arg_count + 1);
-
-  // Restore context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  RestoreContext();
 }
 
 
@@ -3332,7 +3179,8 @@
                         &materialize_true);
         if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeTrueId(),
+                               BailoutState::NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
           __ mov(eax, isolate()->factory()->true_value());
         } else {
@@ -3340,7 +3188,8 @@
         }
         __ jmp(&done, Label::kNear);
         __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        PrepareForBailoutForId(expr->MaterializeFalseId(),
+                               BailoutState::NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
           __ mov(eax, isolate()->factory()->false_value());
         } else {
@@ -3439,9 +3288,9 @@
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
   if (assign_type == VARIABLE) {
-    PrepareForBailout(expr->expression(), TOS_REG);
+    PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
   } else {
-    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+    PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
   }
 
   // Inline smi case if we are in a loop.
@@ -3496,7 +3345,7 @@
   // Convert old value into a number.
   ToNumberStub convert_stub(isolate());
   __ CallStub(&convert_stub);
-  PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+  PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -3544,7 +3393,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN, expr->CountSlot());
-          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          PrepareForBailoutForId(expr->AssignmentId(),
+                                 BailoutState::TOS_REGISTER);
           context.Plug(eax);
         }
         // For all contexts except EffectContext We have the result on
@@ -3556,7 +3406,8 @@
         // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN, expr->CountSlot());
-        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        PrepareForBailoutForId(expr->AssignmentId(),
+                               BailoutState::TOS_REGISTER);
         context()->Plug(eax);
       }
       break;
@@ -3566,7 +3417,7 @@
       PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3605,7 +3456,7 @@
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
       CallIC(ic);
-      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
       if (expr->is_postfix()) {
         // Result is on the stack
         if (!context()->IsEffect()) {
@@ -3701,7 +3552,6 @@
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
-  SetExpressionPosition(expr);
 
   // First we try a fast inlined version of the compare when one of
   // the operands is a literal.
@@ -3721,7 +3571,8 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      CallRuntimeWithOperands(Runtime::kHasProperty);
+      SetExpressionPosition(expr);
+      EmitHasProperty();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
@@ -3729,6 +3580,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       PopOperand(edx);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
@@ -3740,6 +3592,7 @@
 
     default: {
       VisitForAccumulatorValue(expr->right());
+      SetExpressionPosition(expr);
       Condition cc = CompareIC::ComputeCondition(op);
       PopOperand(edx);
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index ed9caa9..82b4fcd 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -82,7 +82,6 @@
     index_ = static_cast<uint8_t>(index);
     DCHECK(static_cast<int>(index_) == index);
     set_state(FREE);
-    set_weakness_type(NORMAL_WEAK);
     set_in_new_space_list(false);
     parameter_or_next_free_.next_free = *first_free;
     *first_free = this;
@@ -195,16 +194,26 @@
 
   bool IsInUse() const { return state() != FREE; }
 
+  bool IsPendingPhantomCallback() const {
+    return state() == PENDING &&
+           (weakness_type() == PHANTOM_WEAK ||
+            weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
+  }
+
+  bool IsPendingPhantomResetHandle() const {
+    return state() == PENDING && weakness_type() == PHANTOM_WEAK_RESET_HANDLE;
+  }
+
   bool IsRetainer() const {
     return state() != FREE &&
-           !(state() == NEAR_DEATH && weakness_type() != NORMAL_WEAK);
+           !(state() == NEAR_DEATH && weakness_type() != FINALIZER_WEAK);
   }
 
   bool IsStrongRetainer() const { return state() == NORMAL; }
 
   bool IsWeakRetainer() const {
     return state() == WEAK || state() == PENDING ||
-           (state() == NEAR_DEATH && weakness_type() == NORMAL_WEAK);
+           (state() == NEAR_DEATH && weakness_type() == FINALIZER_WEAK);
   }
 
   void MarkPending() {
@@ -250,16 +259,6 @@
     parameter_or_next_free_.next_free = value;
   }
 
-  void MakeWeak(void* parameter, WeakCallback weak_callback) {
-    DCHECK(weak_callback != nullptr);
-    DCHECK(IsInUse());
-    CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
-    set_state(WEAK);
-    set_weakness_type(NORMAL_WEAK);
-    set_parameter(parameter);
-    weak_callback_ = weak_callback;
-  }
-
   void MakeWeak(void* parameter,
                 WeakCallbackInfo<void>::Callback phantom_callback,
                 v8::WeakCallbackType type) {
@@ -272,11 +271,23 @@
         set_weakness_type(PHANTOM_WEAK);
         break;
       case v8::WeakCallbackType::kInternalFields:
-      set_weakness_type(PHANTOM_WEAK_2_INTERNAL_FIELDS);
-      break;
+        set_weakness_type(PHANTOM_WEAK_2_INTERNAL_FIELDS);
+        break;
+      case v8::WeakCallbackType::kFinalizer:
+        set_weakness_type(FINALIZER_WEAK);
+        break;
     }
     set_parameter(parameter);
-    weak_callback_ = reinterpret_cast<WeakCallback>(phantom_callback);
+    weak_callback_ = phantom_callback;
+  }
+
+  void MakeWeak(Object*** location_addr) {
+    DCHECK(IsInUse());
+    CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
+    set_state(WEAK);
+    set_weakness_type(PHANTOM_WEAK_RESET_HANDLE);
+    set_parameter(location_addr);
+    weak_callback_ = nullptr;
   }
 
   void* ClearWeakness() {
@@ -293,6 +304,7 @@
     DCHECK(weakness_type() == PHANTOM_WEAK ||
            weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
     DCHECK(state() == PENDING);
+    DCHECK(weak_callback_ != nullptr);
 
     void* internal_fields[v8::kInternalFieldsInWeakCallback] = {nullptr,
                                                                 nullptr};
@@ -317,6 +329,15 @@
     set_state(NEAR_DEATH);
   }
 
+  void ResetPhantomHandle() {
+    DCHECK(weakness_type() == PHANTOM_WEAK_RESET_HANDLE);
+    DCHECK(state() == PENDING);
+    DCHECK(weak_callback_ == nullptr);
+    Object*** handle = reinterpret_cast<Object***>(parameter());
+    *handle = nullptr;
+    Release();
+  }
+
   bool PostGarbageCollectionProcessing(Isolate* isolate) {
     // Handles only weak handles (not phantom) that are dying.
     if (state() != Node::PENDING) return false;
@@ -332,17 +353,17 @@
            ExternalOneByteString::cast(object_)->resource() != NULL);
     DCHECK(!object_->IsExternalTwoByteString() ||
            ExternalTwoByteString::cast(object_)->resource() != NULL);
-    if (weakness_type() != NORMAL_WEAK) return false;
+    if (weakness_type() != FINALIZER_WEAK) {
+      return false;
+    }
 
     // Leaving V8.
     VMState<EXTERNAL> vmstate(isolate);
     HandleScope handle_scope(isolate);
-    Object** object = location();
-    Handle<Object> handle(*object, isolate);
-    v8::WeakCallbackData<v8::Value, void> data(
-        reinterpret_cast<v8::Isolate*>(isolate), parameter(),
-        v8::Utils::ToLocal(handle));
-    set_parameter(NULL);
+    void* internal_fields[v8::kInternalFieldsInWeakCallback] = {nullptr,
+                                                                nullptr};
+    v8::WeakCallbackInfo<void> data(reinterpret_cast<v8::Isolate*>(isolate),
+                                    parameter(), internal_fields, nullptr);
     weak_callback_(data);
 
     // Absence of explicit cleanup or revival of weak handle
@@ -384,7 +405,7 @@
   uint8_t flags_;
 
   // Handle specific callback - might be a weak reference in disguise.
-  WeakCallback weak_callback_;
+  WeakCallbackInfo<void>::Callback weak_callback_;
 
   // Provided data for callback.  In FREE state, this is used for
   // the free list link.
@@ -534,6 +555,7 @@
   }
 
   void RunInternal() override {
+    TRACE_EVENT0("v8", "V8.GCPhantomHandleProcessingCallback");
     isolate()->heap()->CallGCPrologueCallbacks(
         GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
     InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate());
@@ -547,7 +569,6 @@
   DISALLOW_COPY_AND_ASSIGN(PendingPhantomCallbacksSecondPassTask);
 };
 
-
 GlobalHandles::GlobalHandles(Isolate* isolate)
     : isolate_(isolate),
       number_of_global_handles_(0),
@@ -555,9 +576,9 @@
       first_used_block_(NULL),
       first_free_(NULL),
       post_gc_processing_count_(0),
+      number_of_phantom_handle_resets_(0),
       object_group_connections_(kObjectGroupConnectionsCapacity) {}
 
-
 GlobalHandles::~GlobalHandles() {
   NodeBlock* block = first_block_;
   while (block != NULL) {
@@ -599,12 +620,6 @@
 }
 
 
-void GlobalHandles::MakeWeak(Object** location, void* parameter,
-                             WeakCallback weak_callback) {
-  Node::FromLocation(location)->MakeWeak(parameter, weak_callback);
-}
-
-
 typedef v8::WeakCallbackInfo<void>::Callback GenericCallback;
 
 
@@ -614,6 +629,9 @@
   Node::FromLocation(location)->MakeWeak(parameter, phantom_callback, type);
 }
 
+void GlobalHandles::MakeWeak(Object*** location_addr) {
+  Node::FromLocation(*location_addr)->MakeWeak(location_addr);
+}
 
 void* GlobalHandles::ClearWeakness(Object** location) {
   return Node::FromLocation(location)->ClearWeakness();
@@ -649,10 +667,12 @@
     Node* node = it.node();
     if (node->IsWeakRetainer()) {
       // Pending weak phantom handles die immediately. Everything else survives.
-      if (node->state() == Node::PENDING &&
-          node->weakness_type() != NORMAL_WEAK) {
-          node->CollectPhantomCallbackData(isolate(),
-                                           &pending_phantom_callbacks_);
+      if (node->IsPendingPhantomResetHandle()) {
+        node->ResetPhantomHandle();
+        ++number_of_phantom_handle_resets_;
+      } else if (node->IsPendingPhantomCallback()) {
+        node->CollectPhantomCallbackData(isolate(),
+                                         &pending_phantom_callbacks_);
       } else {
         v->VisitPointer(node->location());
       }
@@ -710,8 +730,10 @@
     if ((node->is_independent() || node->is_partially_dependent()) &&
         node->IsWeakRetainer()) {
       // Pending weak phantom handles die immediately. Everything else survives.
-      if (node->state() == Node::PENDING &&
-          node->weakness_type() != NORMAL_WEAK) {
+      if (node->IsPendingPhantomResetHandle()) {
+        node->ResetPhantomHandle();
+        ++number_of_phantom_handle_resets_;
+      } else if (node->IsPendingPhantomCallback()) {
         node->CollectPhantomCallbackData(isolate(),
                                          &pending_phantom_callbacks_);
       } else {
@@ -753,8 +775,10 @@
     if ((node->is_independent() || !node->is_active()) &&
         node->IsWeakRetainer()) {
       // Pending weak phantom handles die immediately. Everything else survives.
-      if (node->state() == Node::PENDING &&
-          node->weakness_type() != NORMAL_WEAK) {
+      if (node->IsPendingPhantomResetHandle()) {
+        node->ResetPhantomHandle();
+        ++number_of_phantom_handle_resets_;
+      } else if (node->IsPendingPhantomCallback()) {
         node->CollectPhantomCallbackData(isolate(),
                                          &pending_phantom_callbacks_);
       } else {
@@ -888,7 +912,7 @@
 }
 
 void ObjectGroupsTracer::PrintObjectGroup(ObjectGroup* group) {
-  PrintIsolate(isolate_, "ObjectGroup (size: %lu)\n", group->length);
+  PrintIsolate(isolate_, "ObjectGroup (size: %" PRIuS ")\n", group->length);
   Object*** objects = group->objects;
 
   for (size_t i = 0; i < group->length; ++i) {
@@ -898,7 +922,7 @@
 }
 
 void ObjectGroupsTracer::PrintImplicitRefGroup(ImplicitRefGroup* group) {
-  PrintIsolate(isolate_, "ImplicitRefGroup (children count: %lu)\n",
+  PrintIsolate(isolate_, "ImplicitRefGroup (children count: %" PRIuS ")\n",
                group->length);
   PrintIsolate(isolate_, "  - Parent: ");
   PrintObject(*(group->parent));
@@ -1223,8 +1247,7 @@
   }
 
   PrintF("Global Handle Statistics:\n");
-  PrintF("  allocated memory = %" V8_SIZET_PREFIX V8_PTR_PREFIX "dB\n",
-         total * sizeof(Node));
+  PrintF("  allocated memory = %" PRIuS "B\n", total * sizeof(Node));
   PrintF("  # weak       = %d\n", weak);
   PrintF("  # pending    = %d\n", pending);
   PrintF("  # near_death = %d\n", near_death);
diff --git a/src/global-handles.h b/src/global-handles.h
index ac8487b..24a2273 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -96,19 +96,21 @@
   RetainedObjectInfo* info;
 };
 
-
 enum WeaknessType {
-  NORMAL_WEAK,  // Embedder gets a handle to the dying object.
+  // Embedder gets a handle to the dying object.
+  FINALIZER_WEAK,
   // In the following cases, the embedder gets the parameter they passed in
   // earlier, and 0 or 2 first internal fields. Note that the internal
   // fields must contain aligned non-V8 pointers.  Getting pointers to V8
   // objects through this interface would be GC unsafe so in that case the
   // embedder gets a null pointer instead.
   PHANTOM_WEAK,
-  PHANTOM_WEAK_2_INTERNAL_FIELDS
+  PHANTOM_WEAK_2_INTERNAL_FIELDS,
+  // The handle is automatically reset by the garbage collector when
+  // the object is no longer reachable.
+  PHANTOM_WEAK_RESET_HANDLE
 };
 
-
 class GlobalHandles {
  public:
   ~GlobalHandles();
@@ -122,14 +124,6 @@
   // Destroy a global handle.
   static void Destroy(Object** location);
 
-  typedef WeakCallbackData<v8::Value, void>::Callback WeakCallback;
-
-  // For a phantom weak reference, the callback does not have access to the
-  // dying object.  Phantom weak references are preferred because they allow
-  // memory to be reclaimed in one GC cycle rather than two.  However, for
-  // historical reasons the default is non-phantom.
-  enum PhantomState { Nonphantom, Phantom };
-
   // Make the global handle weak and set the callback parameter for the
   // handle.  When the garbage collector recognizes that only weak global
   // handles point to an object the callback function is invoked (for each
@@ -140,14 +134,11 @@
   // before the callback is invoked, but the handle can still be identified
   // in the callback by using the location() of the handle.
   static void MakeWeak(Object** location, void* parameter,
-                       WeakCallback weak_callback);
-
-  // It would be nice to template this one, but it's really hard to get
-  // the template instantiator to work right if you do.
-  static void MakeWeak(Object** location, void* parameter,
                        WeakCallbackInfo<void>::Callback weak_callback,
                        v8::WeakCallbackType type);
 
+  static void MakeWeak(Object*** location_addr);
+
   void RecordStats(HeapStats* stats);
 
   // Returns the current number of weak handles.
@@ -162,6 +153,14 @@
     return number_of_global_handles_;
   }
 
+  size_t NumberOfPhantomHandleResets() {
+    return number_of_phantom_handle_resets_;
+  }
+
+  void ResetNumberOfPhantomHandleResets() {
+    number_of_phantom_handle_resets_ = 0;
+  }
+
   // Clear the weakness of a global handle.
   static void* ClearWeakness(Object** location);
 
@@ -344,6 +343,8 @@
 
   int post_gc_processing_count_;
 
+  size_t number_of_phantom_handle_resets_;
+
   // Object groups and implicit references, public and more efficient
   // representation.
   List<ObjectGroup*> object_groups_;
diff --git a/src/globals.h b/src/globals.h
index e7ac2b9..ed297e7 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -124,11 +124,6 @@
 const int kDoubleSize    = sizeof(double);    // NOLINT
 const int kIntptrSize    = sizeof(intptr_t);  // NOLINT
 const int kPointerSize   = sizeof(void*);     // NOLINT
-#if V8_TARGET_ARCH_ARM64
-const int kFrameAlignmentInBytes = 2 * kPointerSize;
-#else
-const int kFrameAlignmentInBytes = kPointerSize;
-#endif
 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
 const int kRegisterSize  = kPointerSize + kPointerSize;
 #else
@@ -457,6 +452,33 @@
   kSimd128Unaligned
 };
 
+// Supported write barrier modes.
+enum WriteBarrierKind : uint8_t {
+  kNoWriteBarrier,
+  kMapWriteBarrier,
+  kPointerWriteBarrier,
+  kFullWriteBarrier
+};
+
+inline size_t hash_value(WriteBarrierKind kind) {
+  return static_cast<uint8_t>(kind);
+}
+
+inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
+  switch (kind) {
+    case kNoWriteBarrier:
+      return os << "NoWriteBarrier";
+    case kMapWriteBarrier:
+      return os << "MapWriteBarrier";
+    case kPointerWriteBarrier:
+      return os << "PointerWriteBarrier";
+    case kFullWriteBarrier:
+      return os << "FullWriteBarrier";
+  }
+  UNREACHABLE();
+  return os;
+}
+
 // A flag that indicates whether objects should be pretenured when
 // allocated (allocated directly into the old generation) or not
 // (allocated in the young generation if the object size and type
@@ -551,7 +573,7 @@
   // Has been executed and only one receiver type has been seen.
   MONOMORPHIC,
   // Check failed due to prototype (or map deprecation).
-  PROTOTYPE_FAILURE,
+  RECOMPUTE_HANDLER,
   // Multiple receiver types have been seen.
   POLYMORPHIC,
   // Many receiver types have been seen.
@@ -562,7 +584,6 @@
   DEBUG_STUB
 };
 
-
 enum CacheHolderFlag {
   kCacheOnPrototype,
   kCacheOnPrototypeReceiverIsDictionary,
@@ -685,7 +706,6 @@
   MIPSr6,
   // ARM64
   ALWAYS_ALIGN_CSP,
-  COHERENT_CACHE,
   // PPC
   FPR_GPR_MOV,
   LWSYNC,
@@ -800,51 +820,48 @@
 // The order of this enum has to be kept in sync with the predicates below.
 enum VariableMode {
   // User declared variables:
-  VAR,             // declared via 'var', and 'function' declarations
+  VAR,  // declared via 'var', and 'function' declarations
 
-  CONST_LEGACY,    // declared via legacy 'const' declarations
+  CONST_LEGACY,  // declared via legacy 'const' declarations
 
-  LET,             // declared via 'let' declarations (first lexical)
+  LET,  // declared via 'let' declarations (first lexical)
 
-  CONST,           // declared via 'const' declarations
-
-  IMPORT,          // declared via 'import' declarations (last lexical)
+  CONST,  // declared via 'const' declarations (last lexical)
 
   // Variables introduced by the compiler:
-  TEMPORARY,       // temporary variables (not user-visible), stack-allocated
-                   // unless the scope as a whole has forced context allocation
+  TEMPORARY,  // temporary variables (not user-visible), stack-allocated
+              // unless the scope as a whole has forced context allocation
 
-  DYNAMIC,         // always require dynamic lookup (we don't know
-                   // the declaration)
+  DYNAMIC,  // always require dynamic lookup (we don't know
+            // the declaration)
 
   DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
                    // variable is global unless it has been shadowed
                    // by an eval-introduced variable
 
-  DYNAMIC_LOCAL    // requires dynamic lookup, but we know that the
-                   // variable is local and where it is unless it
-                   // has been shadowed by an eval-introduced
-                   // variable
+  DYNAMIC_LOCAL  // requires dynamic lookup, but we know that the
+                 // variable is local and where it is unless it
+                 // has been shadowed by an eval-introduced
+                 // variable
 };
 
-
 inline bool IsDynamicVariableMode(VariableMode mode) {
   return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
 }
 
 
 inline bool IsDeclaredVariableMode(VariableMode mode) {
-  return mode >= VAR && mode <= IMPORT;
+  return mode >= VAR && mode <= CONST;
 }
 
 
 inline bool IsLexicalVariableMode(VariableMode mode) {
-  return mode >= LET && mode <= IMPORT;
+  return mode >= LET && mode <= CONST;
 }
 
 
 inline bool IsImmutableVariableMode(VariableMode mode) {
-  return mode == CONST || mode == CONST_LEGACY || mode == IMPORT;
+  return mode == CONST || mode == CONST_LEGACY;
 }
 
 
@@ -945,11 +962,14 @@
   kBaseConstructor = 1 << 5,
   kGetterFunction = 1 << 6,
   kSetterFunction = 1 << 7,
+  kAsyncFunction = 1 << 8,
   kAccessorFunction = kGetterFunction | kSetterFunction,
   kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
   kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
   kClassConstructor =
       kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
+  kAsyncArrowFunction = kArrowFunction | kAsyncFunction,
+  kAsyncConciseMethod = kAsyncFunction | kConciseMethod
 };
 
 inline bool IsValidFunctionKind(FunctionKind kind) {
@@ -964,7 +984,10 @@
          kind == FunctionKind::kDefaultBaseConstructor ||
          kind == FunctionKind::kDefaultSubclassConstructor ||
          kind == FunctionKind::kBaseConstructor ||
-         kind == FunctionKind::kSubclassConstructor;
+         kind == FunctionKind::kSubclassConstructor ||
+         kind == FunctionKind::kAsyncFunction ||
+         kind == FunctionKind::kAsyncArrowFunction ||
+         kind == FunctionKind::kAsyncConciseMethod;
 }
 
 
@@ -979,6 +1002,10 @@
   return kind & FunctionKind::kGeneratorFunction;
 }
 
+inline bool IsAsyncFunction(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kAsyncFunction;
+}
 
 inline bool IsConciseMethod(FunctionKind kind) {
   DCHECK(IsValidFunctionKind(kind));
@@ -1030,6 +1057,7 @@
   if (IsConciseMethod(kind)) return false;
   if (IsArrowFunction(kind)) return false;
   if (IsGeneratorFunction(kind)) return false;
+  if (IsAsyncFunction(kind)) return false;
   return true;
 }
 
diff --git a/src/handles.h b/src/handles.h
index 1f97d6f..ab8ed09 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -43,6 +43,10 @@
 
   V8_INLINE bool is_null() const { return location_ == nullptr; }
 
+  // Returns the raw address where this handle is stored. This should only be
+  // used for hashing handles; do not ever try to dereference it.
+  V8_INLINE Address address() const { return bit_cast<Address>(location_); }
+
  protected:
   // Provides the C++ dereference operator.
   V8_INLINE Object* operator*() const {
@@ -132,14 +136,14 @@
   // Provide function object for location equality comparison.
   struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
     V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
-      return lhs.location() == rhs.location();
+      return lhs.address() == rhs.address();
     }
   };
 
   // Provide function object for location hashing.
   struct hash : public std::unary_function<Handle<T>, size_t> {
     V8_INLINE size_t operator()(Handle<T> const& handle) const {
-      return base::hash<void*>()(handle.location());
+      return base::hash<void*>()(handle.address());
     }
   };
 
diff --git a/src/heap-symbols.h b/src/heap-symbols.h
index f019ace..529342a 100644
--- a/src/heap-symbols.h
+++ b/src/heap-symbols.h
@@ -139,6 +139,8 @@
   V(call_site_position_symbol)              \
   V(call_site_receiver_symbol)              \
   V(call_site_strict_symbol)                \
+  V(call_site_wasm_obj_symbol)              \
+  V(call_site_wasm_func_index_symbol)       \
   V(class_end_position_symbol)              \
   V(class_start_position_symbol)            \
   V(detailed_stack_trace_symbol)            \
@@ -149,7 +151,6 @@
   V(formatted_stack_trace_symbol)           \
   V(frozen_symbol)                          \
   V(hash_code_symbol)                       \
-  V(hidden_properties_symbol)               \
   V(home_object_symbol)                     \
   V(internal_error_symbol)                  \
   V(intl_impl_object_symbol)                \
@@ -162,16 +163,15 @@
   V(nonextensible_symbol)                   \
   V(normal_ic_symbol)                       \
   V(not_mapped_symbol)                      \
-  V(observed_symbol)                        \
   V(premonomorphic_symbol)                  \
   V(promise_combined_deferred_symbol)       \
   V(promise_debug_marker_symbol)            \
   V(promise_has_handler_symbol)             \
-  V(promise_on_resolve_symbol)              \
-  V(promise_on_reject_symbol)               \
+  V(promise_fulfill_reactions_symbol)       \
+  V(promise_reject_reactions_symbol)        \
   V(promise_raw_symbol)                     \
-  V(promise_status_symbol)                  \
-  V(promise_value_symbol)                   \
+  V(promise_state_symbol)                   \
+  V(promise_result_symbol)                  \
   V(sealed_symbol)                          \
   V(stack_trace_symbol)                     \
   V(strict_function_transition_symbol)      \
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc
index 972dfa6..0c411f7 100644
--- a/src/heap/gc-idle-time-handler.cc
+++ b/src/heap/gc-idle-time-handler.cc
@@ -41,8 +41,7 @@
 void GCIdleTimeHeapState::Print() {
   PrintF("contexts_disposed=%d ", contexts_disposed);
   PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
-  PrintF("size_of_objects=%" V8_SIZET_PREFIX V8_PTR_PREFIX "d ",
-         size_of_objects);
+  PrintF("size_of_objects=%" PRIuS " ", size_of_objects);
   PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
 }
 
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index 3c46f52..4bae0a4 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -26,10 +26,8 @@
   start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
   // TODO(cbruni): remove once we fully moved to a trace-based system.
   if (FLAG_runtime_call_stats) {
-    RuntimeCallStats* stats =
-        tracer_->heap_->isolate()->counters()->runtime_call_stats();
-    timer_.Initialize(&stats->GC, stats->current_timer());
-    stats->Enter(&timer_);
+    RuntimeCallStats::Enter(tracer_->heap_->isolate(), &timer_,
+                            &RuntimeCallStats::GC);
   }
 }
 
@@ -40,7 +38,7 @@
       tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
   // TODO(cbruni): remove once we fully moved to a trace-based system.
   if (FLAG_runtime_call_stats) {
-    tracer_->heap_->isolate()->counters()->runtime_call_stats()->Leave(&timer_);
+    RuntimeCallStats::Leave(tracer_->heap_->isolate(), &timer_);
   }
 }
 
@@ -164,7 +162,7 @@
   current_.reduce_memory = heap_->ShouldReduceMemory();
   current_.start_time = start_time;
   current_.start_object_size = heap_->SizeOfObjects();
-  current_.start_memory_size = heap_->isolate()->memory_allocator()->Size();
+  current_.start_memory_size = heap_->memory_allocator()->Size();
   current_.start_holes_size = CountTotalHolesSize(heap_);
   current_.new_space_object_size =
       heap_->new_space()->top() - heap_->new_space()->bottom();
@@ -190,10 +188,7 @@
       start_time, used_memory);
   // TODO(cbruni): remove once we fully moved to a trace-based system.
   if (FLAG_runtime_call_stats) {
-    RuntimeCallStats* stats =
-        heap_->isolate()->counters()->runtime_call_stats();
-    timer_.Initialize(&stats->GC, stats->current_timer());
-    stats->Enter(&timer_);
+    RuntimeCallStats::Enter(heap_->isolate(), &timer_, &RuntimeCallStats::GC);
   }
 }
 
@@ -214,7 +209,7 @@
 
   current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
   current_.end_object_size = heap_->SizeOfObjects();
-  current_.end_memory_size = heap_->isolate()->memory_allocator()->Size();
+  current_.end_memory_size = heap_->memory_allocator()->Size();
   current_.end_holes_size = CountTotalHolesSize(heap_);
   current_.survived_new_space_object_size = heap_->SurvivedNewSpaceObjectSize();
 
@@ -303,7 +298,7 @@
   cumulative_incremental_marking_finalization_duration_ = 0.0;
   // TODO(cbruni): remove once we fully moved to a trace-based system.
   if (FLAG_runtime_call_stats) {
-    heap_->isolate()->counters()->runtime_call_stats()->Leave(&timer_);
+    RuntimeCallStats::Leave(heap_->isolate(), &timer_);
   }
 }
 
@@ -411,7 +406,7 @@
 
 void GCTracer::Print() const {
   if (FLAG_trace_gc) {
-    PrintIsolate(heap_->isolate(), "");
+    PrintIsolate(heap_->isolate(), "%s", "");
   }
   Output("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
 
@@ -480,20 +475,20 @@
                    "steps_count=%d "
                    "steps_took=%.1f "
                    "scavenge_throughput=%.f "
-                   "total_size_before=%" V8_PTR_PREFIX
-                   "d "
-                   "total_size_after=%" V8_PTR_PREFIX
-                   "d "
-                   "holes_size_before=%" V8_PTR_PREFIX
-                   "d "
-                   "holes_size_after=%" V8_PTR_PREFIX
-                   "d "
-                   "allocated=%" V8_PTR_PREFIX
-                   "d "
-                   "promoted=%" V8_PTR_PREFIX
-                   "d "
-                   "semi_space_copied=%" V8_PTR_PREFIX
-                   "d "
+                   "total_size_before=%" V8PRIdPTR
+                   " "
+                   "total_size_after=%" V8PRIdPTR
+                   " "
+                   "holes_size_before=%" V8PRIdPTR
+                   " "
+                   "holes_size_after=%" V8PRIdPTR
+                   " "
+                   "allocated=%" V8PRIdPTR
+                   " "
+                   "promoted=%" V8PRIdPTR
+                   " "
+                   "semi_space_copied=%" V8PRIdPTR
+                   " "
                    "nodes_died_in_new=%d "
                    "nodes_copied_in_new=%d "
                    "nodes_promoted=%d "
@@ -555,7 +550,6 @@
           "evacuate.clean_up=%.1f "
           "evacuate.copy=%.1f "
           "evacuate.update_pointers=%.1f "
-          "evacuate.update_pointers.between_evacuated=%.1f "
           "evacuate.update_pointers.to_evacuated=%.1f "
           "evacuate.update_pointers.to_new=%.1f "
           "evacuate.update_pointers.weak=%.1f "
@@ -586,20 +580,20 @@
           "finalization_steps_took=%.1f "
           "finalization_longest_step=%.1f "
           "incremental_marking_throughput=%.f "
-          "total_size_before=%" V8_PTR_PREFIX
-          "d "
-          "total_size_after=%" V8_PTR_PREFIX
-          "d "
-          "holes_size_before=%" V8_PTR_PREFIX
-          "d "
-          "holes_size_after=%" V8_PTR_PREFIX
-          "d "
-          "allocated=%" V8_PTR_PREFIX
-          "d "
-          "promoted=%" V8_PTR_PREFIX
-          "d "
-          "semi_space_copied=%" V8_PTR_PREFIX
-          "d "
+          "total_size_before=%" V8PRIdPTR
+          " "
+          "total_size_after=%" V8PRIdPTR
+          " "
+          "holes_size_before=%" V8PRIdPTR
+          " "
+          "holes_size_after=%" V8PRIdPTR
+          " "
+          "allocated=%" V8PRIdPTR
+          " "
+          "promoted=%" V8PRIdPTR
+          " "
+          "semi_space_copied=%" V8PRIdPTR
+          " "
           "nodes_died_in_new=%d "
           "nodes_copied_in_new=%d "
           "nodes_promoted=%d "
@@ -628,7 +622,6 @@
           current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
           current_.scopes[Scope::MC_EVACUATE_COPY],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
-          current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index 9ea3cce..a657f15 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -5,6 +5,7 @@
 #ifndef V8_HEAP_GC_TRACER_H_
 #define V8_HEAP_GC_TRACER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/platform/platform.h"
 #include "src/counters.h"
 #include "src/globals.h"
@@ -76,7 +77,6 @@
   F(MC_EVACUATE_CLEAN_UP)                          \
   F(MC_EVACUATE_COPY)                              \
   F(MC_EVACUATE_UPDATE_POINTERS)                   \
-  F(MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED) \
   F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED)      \
   F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW)            \
   F(MC_EVACUATE_UPDATE_POINTERS_WEAK)              \
@@ -112,7 +112,7 @@
 #define TRACE_GC(tracer, scope_id)                             \
   GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id);       \
   GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id); \
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"),                \
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),             \
                GCTracer::Scope::Name(gc_tracer_scope_id))
 
 // GCTracer collects and prints ONE line after each garbage collector
@@ -383,7 +383,7 @@
 
   // Prints a line and also adds it to the heap's ring buffer so that
   // it can be included in later crash dumps.
-  void Output(const char* format, ...) const;
+  void PRINTF_FORMAT(2, 3) Output(const char* format, ...) const;
 
   void ClearMarkCompactStatistics() {
     cumulative_incremental_marking_steps_ = 0;
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index e31d3d6..f9c9235 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -395,7 +395,7 @@
 
 
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
-  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+  Page* page = Page::FromAddress(old_address);
   Address age_mark = new_space_.age_mark();
   return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
          (!page->ContainsLimit(age_mark) || old_address < age_mark);
@@ -405,9 +405,20 @@
   if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
     return;
   }
-  Page* page = Page::FromAddress(reinterpret_cast<Address>(object));
-  Address slot = HeapObject::cast(object)->address() + offset;
-  RememberedSet<OLD_TO_NEW>::Insert(page, slot);
+  RememberedSet<OLD_TO_NEW>::Insert(
+      Page::FromAddress(reinterpret_cast<Address>(object)),
+      HeapObject::cast(object)->address() + offset);
+}
+
+void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
+  if (InNewSpace(array)) return;
+  Page* page = Page::FromAddress(reinterpret_cast<Address>(array));
+  for (int i = 0; i < length; i++) {
+    if (!InNewSpace(array->get(offset + i))) continue;
+    RememberedSet<OLD_TO_NEW>::Insert(
+        page,
+        reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
+  }
 }
 
 
@@ -457,7 +468,7 @@
   Address object_address = object->address();
   Address memento_address = object_address + object->Size();
   Address last_memento_word_address = memento_address + kPointerSize;
-  if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
+  if (!Page::OnSamePage(object_address, last_memento_word_address)) {
     return nullptr;
   }
   HeapObject* candidate = HeapObject::FromAddress(memento_address);
@@ -485,7 +496,7 @@
       top = NewSpaceTop();
       DCHECK(memento_address == top ||
              memento_address + HeapObject::kHeaderSize <= top ||
-             !NewSpacePage::OnSamePage(memento_address, top - 1));
+             !Page::OnSamePage(memento_address, top - 1));
       if ((memento_address != top) && memento_candidate->IsValid()) {
         return memento_candidate;
       }
@@ -674,30 +685,30 @@
   return last_id;
 }
 
-
 void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
   DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
-
 void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
   DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
   set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
-
 void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
   DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
   set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
-
 void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
   DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
   set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
+void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
+  DCHECK(interpreter_entry_return_pc_offset() == Smi::FromInt(0));
+  set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
+}
 
 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
     : heap_(isolate->heap()) {
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index c3f56ac..c8f1557 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -71,7 +71,7 @@
 Heap::Heap()
     : amount_of_external_allocated_memory_(0),
       amount_of_external_allocated_memory_at_last_global_gc_(0),
-      isolate_(NULL),
+      isolate_(nullptr),
       code_range_size_(0),
       // semispace_size_ should be a power of 2 and old_generation_size_ should
       // be a multiple of Page::kPageSize.
@@ -115,7 +115,6 @@
       inline_allocation_disabled_(false),
       total_regexp_code_generated_(0),
       tracer_(nullptr),
-      embedder_heap_tracer_(nullptr),
       high_survival_rate_period_length_(0),
       promoted_objects_size_(0),
       promotion_ratio_(0),
@@ -136,6 +135,7 @@
       last_gc_time_(0.0),
       scavenge_collector_(nullptr),
       mark_compact_collector_(nullptr),
+      memory_allocator_(nullptr),
       store_buffer_(this),
       incremental_marking_(nullptr),
       gc_idle_time_handler_(nullptr),
@@ -157,9 +157,6 @@
       current_gc_flags_(Heap::kNoGCFlags),
       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
       external_string_table_(this),
-      chunks_queued_for_free_(NULL),
-      concurrent_unmapping_tasks_active_(0),
-      pending_unmapping_tasks_semaphore_(0),
       gc_callbacks_depth_(0),
       deserialization_complete_(false),
       strong_roots_list_(NULL),
@@ -191,8 +188,14 @@
 intptr_t Heap::Capacity() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.Capacity() + old_space_->Capacity() +
-         code_space_->Capacity() + map_space_->Capacity();
+  return new_space_.Capacity() + OldGenerationCapacity();
+}
+
+intptr_t Heap::OldGenerationCapacity() {
+  if (!HasBeenSetUp()) return 0;
+
+  return old_space_->Capacity() + code_space_->Capacity() +
+         map_space_->Capacity() + lo_space_->SizeOfObjects();
 }
 
 
@@ -225,7 +228,7 @@
 intptr_t Heap::CommittedMemoryExecutable() {
   if (!HasBeenSetUp()) return 0;
 
-  return isolate()->memory_allocator()->SizeExecutable();
+  return memory_allocator()->SizeExecutable();
 }
 
 
@@ -296,7 +299,7 @@
   // and does not count available bytes already in the old space or code
   // space.  Undercounting is safe---we may get an unrequested full GC when
   // a scavenge would have succeeded.
-  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
+  if (memory_allocator()->MaxAvailable() <= new_space_.Size()) {
     isolate_->counters()
         ->gc_compactor_caused_by_oldspace_exhaustion()
         ->Increment();
@@ -336,61 +339,59 @@
 
 void Heap::PrintShortHeapStatistics() {
   if (!FLAG_trace_gc_verbose) return;
-  PrintIsolate(isolate_, "Memory allocator,   used: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", available: %6" V8_PTR_PREFIX "d KB\n",
-               isolate_->memory_allocator()->Size() / KB,
-               isolate_->memory_allocator()->Available() / KB);
-  PrintIsolate(isolate_, "New space,          used: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", available: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintIsolate(isolate_, "Memory allocator,   used: %6" V8PRIdPTR
+                         " KB, available: %6" V8PRIdPTR " KB\n",
+               memory_allocator()->Size() / KB,
+               memory_allocator()->Available() / KB);
+  PrintIsolate(isolate_, "New space,          used: %6" V8PRIdPTR
+                         " KB"
+                         ", available: %6" V8PRIdPTR
+                         " KB"
+                         ", committed: %6" V8PRIdPTR " KB\n",
                new_space_.Size() / KB, new_space_.Available() / KB,
                new_space_.CommittedMemory() / KB);
-  PrintIsolate(isolate_, "Old space,          used: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", available: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintIsolate(isolate_, "Old space,          used: %6" V8PRIdPTR
+                         " KB"
+                         ", available: %6" V8PRIdPTR
+                         " KB"
+                         ", committed: %6" V8PRIdPTR " KB\n",
                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
                old_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "Code space,         used: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", available: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintIsolate(isolate_, "Code space,         used: %6" V8PRIdPTR
+                         " KB"
+                         ", available: %6" V8PRIdPTR
+                         " KB"
+                         ", committed: %6" V8PRIdPTR " KB\n",
                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
                code_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "Map space,          used: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", available: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintIsolate(isolate_, "Map space,          used: %6" V8PRIdPTR
+                         " KB"
+                         ", available: %6" V8PRIdPTR
+                         " KB"
+                         ", committed: %6" V8PRIdPTR " KB\n",
                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
                map_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "Large object space, used: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", available: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
+                         " KB"
+                         ", available: %6" V8PRIdPTR
+                         " KB"
+                         ", committed: %6" V8PRIdPTR " KB\n",
                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
                lo_space_->CommittedMemory() / KB);
-  PrintIsolate(isolate_, "All spaces,         used: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", available: %6" V8_PTR_PREFIX
-                         "d KB"
-                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+  PrintIsolate(isolate_, "All spaces,         used: %6" V8PRIdPTR
+                         " KB"
+                         ", available: %6" V8PRIdPTR
+                         " KB"
+                         ", committed: %6" V8PRIdPTR " KB\n",
                this->SizeOfObjects() / KB, this->Available() / KB,
                this->CommittedMemory() / KB);
   PrintIsolate(
-      isolate_, "External memory reported: %6" V8_PTR_PREFIX "d KB\n",
+      isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
       static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
                total_gc_time_ms_);
 }
 
-
 // TODO(1238405): Combine the infrastructure for --heap-stats and
 // --log-gc to avoid the complicated preprocessor and flag testing.
 void Heap::ReportStatisticsAfterGC() {
@@ -762,10 +763,10 @@
       // If GC happens while adding a stack trace to the weak fixed array,
       // which has been copied into a larger backing store, we may run into
       // a stack trace that has already been preprocessed. Guard against this.
-      if (!maybe_code->IsCode()) break;
-      Code* code = Code::cast(maybe_code);
+      if (!maybe_code->IsAbstractCode()) break;
+      AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
       int offset = Smi::cast(elements->get(j + 3))->value();
-      int pos = code->SourcePosition(offset);
+      int pos = abstract_code->SourcePosition(offset);
       elements->set(j + 2, Smi::FromInt(pos));
     }
   }
@@ -818,10 +819,10 @@
     PrintF("[IncrementalMarking] (%s).\n", gc_reason);
   }
 
-  TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
   HistogramTimerScope incremental_marking_scope(
       isolate()->counters()->gc_incremental_marking_finalize());
   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
+  TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
 
   {
     GCCallbacksScope scope(this);
@@ -941,7 +942,7 @@
   // may be uninitialized memory behind top. We fill the remainder of the page
   // with a filler.
   Address to_top = new_space_.top();
-  NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize);
+  Page* page = Page::FromAddress(to_top - kPointerSize);
   if (page->Contains(to_top)) {
     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
@@ -976,10 +977,6 @@
       !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
       !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
       OldGenerationAllocationLimitReached()) {
-    // Make progress in incremental marking.
-    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
-    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
-                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
     if (!incremental_marking()->IsComplete() &&
         !mark_compact_collector()->marking_deque_.IsEmpty() &&
         !FLAG_gc_global) {
@@ -1102,13 +1099,7 @@
   DCHECK(array->map() != fixed_cow_array_map());
   Object** dst_objects = array->data_start() + dst_index;
   MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
-  if (!InNewSpace(array)) {
-    for (int i = 0; i < len; i++) {
-      RecordWrite(array, array->OffsetOfElementAt(dst_index + i),
-                  dst_objects[i]);
-    }
-  }
-  incremental_marking()->IterateBlackObject(array);
+  FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
 }
 
 
@@ -1229,7 +1220,7 @@
     if (!cache->IsUndefined()) {
       NormalizedMapCache::cast(cache)->Clear();
     }
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+    context = Context::cast(context)->next_context_link();
   }
 }
 
@@ -1475,10 +1466,6 @@
   CompletelyClearInstanceofCache();
 
   FlushNumberStringCache();
-  if (FLAG_cleanup_code_caches_at_gc) {
-    polymorphic_code_cache()->set_cache(undefined_value());
-  }
-
   ClearNormalizedMapCaches();
 }
 
@@ -1545,15 +1532,12 @@
   if (object->IsSmi()) return false;
   HeapObject* heap_object = HeapObject::cast(object);
   if (!object->IsJSObject()) return false;
-  Object* obj_constructor = (JSObject::cast(object))->map()->GetConstructor();
-  if (!obj_constructor->IsJSFunction()) return false;
-  JSFunction* constructor = JSFunction::cast(obj_constructor);
-  if (!constructor->shared()->IsApiFunction()) return false;
-  if (constructor != nullptr &&
-      constructor->initial_map() == heap_object->map()) {
-    return true;
-  }
-  return false;
+  JSObject* js_object = JSObject::cast(object);
+  if (!js_object->WasConstructedFromApiFunction()) return false;
+  JSFunction* constructor =
+      JSFunction::cast(js_object->map()->GetConstructor());
+
+  return constructor->initial_map() == heap_object->map();
 }
 
 
@@ -1565,7 +1549,8 @@
   front_ = rear_ =
       reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
   limit_ = reinterpret_cast<struct Entry*>(
-      Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
+      Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
+          ->area_start());
   emergency_stack_ = NULL;
 }
 
@@ -1573,7 +1558,7 @@
 void PromotionQueue::RelocateQueueHead() {
   DCHECK(emergency_stack_ == NULL);
 
-  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
   struct Entry* head_start = rear_;
   struct Entry* head_end =
       Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
@@ -1778,12 +1763,6 @@
 
 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
     ExternalStringTableUpdaterCallback updater_func) {
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    external_string_table_.Verify();
-  }
-#endif
-
   if (external_string_table_.new_space_strings_.is_empty()) return;
 
   Object** start = &external_string_table_.new_space_strings_[0];
@@ -1791,7 +1770,6 @@
   Object** last = start;
 
   for (Object** p = start; p < end; ++p) {
-    DCHECK(InFromSpace(*p));
     String* target = updater_func(this, p);
 
     if (target == NULL) continue;
@@ -1929,13 +1907,14 @@
     // queue of unprocessed copied objects.  Process them until the
     // queue is empty.
     while (new_space_front != new_space_.top()) {
-      if (!NewSpacePage::IsAtEnd(new_space_front)) {
+      if (!Page::IsAlignedToPageSize(new_space_front)) {
         HeapObject* object = HeapObject::FromAddress(new_space_front);
         new_space_front +=
             StaticScavengeVisitor::IterateBody(object->map(), object);
       } else {
-        new_space_front =
-            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
+        new_space_front = Page::FromAllocationAreaAddress(new_space_front)
+                              ->next_page()
+                              ->area_start();
       }
     }
 
@@ -2174,6 +2153,21 @@
 #undef STRUCT_TABLE_ELEMENT
 };
 
+namespace {
+
+void FinalizePartialMap(Heap* heap, Map* map) {
+  map->set_code_cache(heap->empty_fixed_array());
+  map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
+  map->set_raw_transitions(Smi::FromInt(0));
+  map->set_instance_descriptors(heap->empty_descriptor_array());
+  if (FLAG_unbox_double_fields) {
+    map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+  }
+  map->set_prototype(heap->null_value());
+  map->set_constructor_or_backpointer(heap->null_value());
+}
+
+}  // namespace
 
 bool Heap::CreateInitialMaps() {
   HeapObject* obj = nullptr;
@@ -2195,8 +2189,10 @@
   }
 
     ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+    fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
+    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
 
 #undef ALLOCATE_PARTIAL_MAP
   }
@@ -2222,6 +2218,12 @@
   set_undefined_value(Oddball::cast(obj));
   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
   DCHECK(!InNewSpace(undefined_value()));
+  {
+    AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
+    if (!allocation.To(&obj)) return false;
+  }
+  set_the_hole_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kTheHole);
 
   // Set preliminary exception sentinel value before actually initializing it.
   set_exception(null_value());
@@ -2234,55 +2236,13 @@
   set_empty_descriptor_array(DescriptorArray::cast(obj));
 
   // Fix the instance_descriptors for the existing maps.
-  meta_map()->set_code_cache(empty_fixed_array());
-  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  meta_map()->set_raw_transitions(Smi::FromInt(0));
-  meta_map()->set_instance_descriptors(empty_descriptor_array());
-  if (FLAG_unbox_double_fields) {
-    meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
-  }
-
-  fixed_array_map()->set_code_cache(empty_fixed_array());
-  fixed_array_map()->set_dependent_code(
-      DependentCode::cast(empty_fixed_array()));
-  fixed_array_map()->set_raw_transitions(Smi::FromInt(0));
-  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
-  if (FLAG_unbox_double_fields) {
-    fixed_array_map()->set_layout_descriptor(
-        LayoutDescriptor::FastPointerLayout());
-  }
-
-  undefined_map()->set_code_cache(empty_fixed_array());
-  undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  undefined_map()->set_raw_transitions(Smi::FromInt(0));
-  undefined_map()->set_instance_descriptors(empty_descriptor_array());
-  if (FLAG_unbox_double_fields) {
-    undefined_map()->set_layout_descriptor(
-        LayoutDescriptor::FastPointerLayout());
-  }
-
-  null_map()->set_code_cache(empty_fixed_array());
-  null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  null_map()->set_raw_transitions(Smi::FromInt(0));
-  null_map()->set_instance_descriptors(empty_descriptor_array());
-  if (FLAG_unbox_double_fields) {
-    null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
-  }
-  null_map()->set_is_undetectable();
-
-  // Fix prototype object for existing maps.
-  meta_map()->set_prototype(null_value());
-  meta_map()->set_constructor_or_backpointer(null_value());
-
-  fixed_array_map()->set_prototype(null_value());
-  fixed_array_map()->set_constructor_or_backpointer(null_value());
-
-  undefined_map()->set_prototype(null_value());
-  undefined_map()->set_constructor_or_backpointer(null_value());
+  FinalizePartialMap(this, meta_map());
+  FinalizePartialMap(this, fixed_array_map());
+  FinalizePartialMap(this, undefined_map());
   undefined_map()->set_is_undetectable();
-
-  null_map()->set_prototype(null_value());
-  null_map()->set_constructor_or_backpointer(null_value());
+  FinalizePartialMap(this, null_map());
+  null_map()->set_is_undetectable();
+  FinalizePartialMap(this, the_hole_map());
 
   {  // Map allocation
 #define ALLOCATE_MAP(instance_type, size, field_name)               \
@@ -2304,7 +2264,8 @@
   }
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
-    DCHECK(fixed_array_map() != fixed_cow_array_map());
+    fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
+    DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
     ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
@@ -2320,7 +2281,6 @@
 #undef ALLOCATE_SIMD128_MAP
     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
 
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
     ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
                            Context::BOOLEAN_FUNCTION_INDEX);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
@@ -2329,6 +2289,7 @@
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
 
     for (unsigned i = 0; i < arraysize(string_type_table); i++) {
       const StringTypeTable& entry = string_type_table[i];
@@ -2354,6 +2315,7 @@
     }
 
     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
+    fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
     ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
@@ -2644,10 +2606,6 @@
   set_minus_infinity_value(
       *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
 
-  // The hole has not been created yet, but we want to put something
-  // predictable in the gaps in the string table, so lets make that Smi zero.
-  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
-
   // Allocate initial string table.
   set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
 
@@ -2663,6 +2621,11 @@
                       handle(Smi::FromInt(0), isolate()), false, "object",
                       Oddball::kNull);
 
+  // Initialize the_hole_value.
+  Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
+                      handle(Smi::FromInt(-1), isolate()), false, "undefined",
+                      Oddball::kTheHole);
+
   // Initialize the true_value.
   Oddball::Initialize(isolate(), factory->true_value(), "true",
                       handle(Smi::FromInt(1), isolate()), true, "boolean",
@@ -2673,10 +2636,6 @@
                       handle(Smi::FromInt(0), isolate()), false, "boolean",
                       Oddball::kFalse);
 
-  set_the_hole_value(*factory->NewOddball(
-      factory->the_hole_map(), "hole", handle(Smi::FromInt(-1), isolate()),
-      false, "undefined", Oddball::kTheHole));
-
   set_uninitialized_value(
       *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
                            handle(Smi::FromInt(-1), isolate()), false,
@@ -2706,6 +2665,11 @@
                            handle(Smi::FromInt(-6), isolate()), false,
                            "undefined", Oddball::kOptimizedOut));
 
+  set_stale_register(
+      *factory->NewOddball(factory->stale_register_map(), "stale_register",
+                           handle(Smi::FromInt(-7), isolate()), false,
+                           "undefined", Oddball::kStaleRegister));
+
   for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
     Handle<String> str =
         factory->InternalizeUtf8String(constant_string_table[i].contents);
@@ -2716,13 +2680,6 @@
   // expanding the dictionary during bootstrapping.
   set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
 
-  // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
-  // is set to avoid expanding the dictionary during bootstrapping.
-  set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
-
-  set_polymorphic_code_cache(PolymorphicCodeCache::cast(
-      *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
-
   set_instanceof_cache_function(Smi::FromInt(0));
   set_instanceof_cache_map(Smi::FromInt(0));
   set_instanceof_cache_answer(Smi::FromInt(0));
@@ -2740,14 +2697,6 @@
 #undef SYMBOL_INIT
   }
 
-  // The {hidden_properties_symbol} is special because it is the only name with
-  // hash code zero. This ensures that it will always be the first entry as
-  // sorted by hash code in descriptor arrays. It is used to identify the hidden
-  // properties in JSObjects.
-  // kIsNotArrayIndexMask is a computed hash with value zero.
-  Symbol::cast(roots_[khidden_properties_symbolRootIndex])
-      ->set_hash_field(Name::kIsNotArrayIndexMask);
-
   {
     HandleScope scope(isolate());
 #define SYMBOL_INIT(name, description)                                      \
@@ -2768,8 +2717,6 @@
 #undef SYMBOL_INIT
   }
 
-  CreateFixedStubs();
-
   // Allocate the dictionary of intrinsic function names.
   Handle<NameDictionary> intrinsic_names =
       NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
@@ -2812,10 +2759,6 @@
   // The symbol registry is initialized lazily.
   set_symbol_registry(Smi::FromInt(0));
 
-  // Allocate object to hold object observation state.
-  set_observation_state(*factory->NewJSObjectFromMap(
-      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
-
   // Microtask queue uses the empty fixed array as a sentinel for "empty".
   // Number of queued microtasks stored in Isolate::pending_microtask_count().
   set_microtask_queue(empty_fixed_array());
@@ -2896,8 +2839,16 @@
   cell->set_value(the_hole_value());
   set_empty_property_cell(*cell);
 
-  Handle<PropertyCell> species_cell = factory->NewPropertyCell();
-  species_cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+  cell = factory->NewPropertyCell();
+  cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+  set_has_instance_protector(*cell);
+
+  Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
+      handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
+  set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
+
+  Handle<Cell> species_cell = factory->NewCell(
+      handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
   set_species_protector(*species_cell);
 
   set_weak_stack_trace_list(Smi::FromInt(0));
@@ -2915,6 +2866,8 @@
 
   // Initialize compilation cache.
   isolate_->compilation_cache()->Clear();
+
+  CreateFixedStubs();
 }
 
 
@@ -2925,8 +2878,6 @@
     case kInstanceofCacheMapRootIndex:
     case kInstanceofCacheAnswerRootIndex:
     case kCodeStubsRootIndex:
-    case kNonMonomorphicCacheRootIndex:
-    case kPolymorphicCodeCacheRootIndex:
     case kEmptyScriptRootIndex:
     case kSymbolRegistryRootIndex:
     case kScriptListRootIndex:
@@ -3166,10 +3117,6 @@
   DCHECK(!lo_space()->Contains(object));
   DCHECK(object->map() != fixed_cow_array_map());
 
-  // Ensure that the no handle-scope has more than one pointer to the same
-  // backing-store.
-  SLOW_DCHECK(CountHandlesForObject(object) <= 1);
-
   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
@@ -3185,7 +3132,6 @@
   // we still do it.
   CreateFillerObjectAt(object->address(), bytes_to_trim,
                        ClearRecordedSlots::kYes);
-
   // Initialize header of the trimmed array. Since left trimming is only
   // performed on pages which are not concurrently swept creating a filler
   // object does not require synchronization.
@@ -3197,6 +3143,11 @@
   FixedArrayBase* new_object =
       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
 
+  // Remove recorded slots for the new map and length offset.
+  ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
+  ClearRecordedSlot(new_object, HeapObject::RawField(
+                                    new_object, FixedArrayBase::kLengthOffset));
+
   // Maintain consistency of live bytes during incremental marking
   Marking::TransferMark(this, object->address(), new_start);
   AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
@@ -3372,8 +3323,9 @@
   result->set_map_no_write_barrier(code_map());
   Code* code = Code::cast(result);
   DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
-  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()) ||
+  DCHECK(memory_allocator()->code_range() == NULL ||
+         !memory_allocator()->code_range()->valid() ||
+         memory_allocator()->code_range()->contains(code->address()) ||
          object_size <= code_space()->AreaSize());
   code->set_gc_metadata(Smi::FromInt(0));
   code->set_ic_age(global_ic_age_);
@@ -3398,8 +3350,9 @@
 
   // Relocate the copy.
   DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
-  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()) ||
+  DCHECK(memory_allocator()->code_range() == NULL ||
+         !memory_allocator()->code_range()->valid() ||
+         memory_allocator()->code_range()->contains(code->address()) ||
          obj_size <= code_space()->AreaSize());
   new_code->Relocate(new_addr - old_addr);
   // We have to iterate over the object and process its pointers when black
@@ -3467,8 +3420,9 @@
 
   // Relocate the copy.
   DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
-  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()) ||
+  DCHECK(memory_allocator()->code_range() == NULL ||
+         !memory_allocator()->code_range()->valid() ||
+         memory_allocator()->code_range()->contains(code->address()) ||
          new_obj_size <= code_space()->AreaSize());
 
   new_code->Relocate(new_addr - old_addr);
@@ -3610,6 +3564,7 @@
   CHECK(map->instance_type() == JS_REGEXP_TYPE ||
         map->instance_type() == JS_OBJECT_TYPE ||
         map->instance_type() == JS_ARRAY_TYPE ||
+        map->instance_type() == JS_API_OBJECT_TYPE ||
         map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
 
   int object_size = map->instance_size();
@@ -4018,8 +3973,7 @@
 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
                                                    PretenureFlag pretenure) {
   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
-    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length",
-                                                kDoubleAligned);
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
   }
   int size = FixedDoubleArray::SizeFor(length);
   AllocationSpace space = SelectSpace(pretenure);
@@ -4520,7 +4474,7 @@
   USE(title);
   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
          gc_count_);
-  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
+  PrintF("old_generation_allocation_limit_ %" V8PRIdPTR "\n",
          old_generation_allocation_limit_);
 
   PrintF("\n");
@@ -4529,7 +4483,7 @@
   PrintF("\n");
 
   PrintF("Heap statistics : ");
-  isolate_->memory_allocator()->ReportStatistics();
+  memory_allocator()->ReportStatistics();
   PrintF("To space : ");
   new_space_.ReportStatistics();
   PrintF("Old space : ");
@@ -4546,7 +4500,7 @@
 #endif  // DEBUG
 
 bool Heap::Contains(HeapObject* value) {
-  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+  if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
     return false;
   }
   return HasBeenSetUp() &&
@@ -4556,7 +4510,7 @@
 }
 
 bool Heap::ContainsSlow(Address addr) {
-  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) {
+  if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
     return false;
   }
   return HasBeenSetUp() &&
@@ -4566,7 +4520,7 @@
 }
 
 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
-  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+  if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
     return false;
   }
   if (!HasBeenSetUp()) return false;
@@ -4588,7 +4542,7 @@
 }
 
 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
-  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) {
+  if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
     return false;
   }
   if (!HasBeenSetUp()) return false;
@@ -4681,7 +4635,7 @@
   NewSpacePageIterator it(new_space_.FromSpaceStart(),
                           new_space_.FromSpaceEnd());
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     for (Address cursor = page->area_start(), limit = page->area_end();
          cursor < limit; cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4805,6 +4759,49 @@
   v->Synchronize(VisitorSynchronization::kSmiRootList);
 }
 
+// We cannot avoid stale handles to left-trimmed objects, but can only make
+// sure all handles still needed are updated. Filter out a stale pointer
+// and clear the slot to allow post processing of handles (needed because
+// the sweeper might actually free the underlying page).
+class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor {
+ public:
+  explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
+    USE(heap_);
+  }
+
+  void VisitPointer(Object** p) override { FixHandle(p); }
+
+  void VisitPointers(Object** start, Object** end) override {
+    for (Object** p = start; p < end; p++) FixHandle(p);
+  }
+
+ private:
+  inline void FixHandle(Object** p) {
+    HeapObject* current = reinterpret_cast<HeapObject*>(*p);
+    if (!current->IsHeapObject()) return;
+    const MapWord map_word = current->map_word();
+    if (!map_word.IsForwardingAddress() && current->IsFiller()) {
+#ifdef DEBUG
+      // We need to find a FixedArrayBase map after walking the fillers.
+      while (current->IsFiller()) {
+        Address next = reinterpret_cast<Address>(current);
+        if (current->map() == heap_->one_pointer_filler_map()) {
+          next += kPointerSize;
+        } else if (current->map() == heap_->two_pointer_filler_map()) {
+          next += 2 * kPointerSize;
+        } else {
+          next += current->Size();
+        }
+        current = reinterpret_cast<HeapObject*>(next);
+      }
+      DCHECK(current->IsFixedArrayBase());
+#endif  // DEBUG
+      *p = nullptr;
+    }
+  }
+
+  Heap* heap_;
+};
 
 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
@@ -4825,6 +4822,8 @@
   v->Synchronize(VisitorSynchronization::kCompilationCache);
 
   // Iterate over local handles in handle scopes.
+  FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
+  isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
   isolate_->handle_scope_implementer()->Iterate(v);
   isolate_->IterateDeferredHandles(v);
   v->Synchronize(VisitorSynchronization::kHandleScope);
@@ -5029,12 +5028,11 @@
   *stats->map_space_capacity = map_space_->Capacity();
   *stats->lo_space_size = lo_space_->Size();
   isolate_->global_handles()->RecordStats(stats);
-  *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
+  *stats->memory_allocator_size = memory_allocator()->Size();
   *stats->memory_allocator_capacity =
-      isolate()->memory_allocator()->Size() +
-      isolate()->memory_allocator()->Available();
+      memory_allocator()->Size() + memory_allocator()->Available();
   *stats->os_error = base::OS::GetLastError();
-  isolate()->memory_allocator()->Available();
+  memory_allocator()->Available();
   if (take_snapshot) {
     HeapIterator iterator(this);
     for (HeapObject* obj = iterator.next(); obj != NULL;
@@ -5188,8 +5186,8 @@
       CalculateOldGenerationAllocationLimit(factor, old_gen_size);
 
   if (FLAG_trace_gc_verbose) {
-    PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
-                           "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
+    PrintIsolate(isolate_, "Grow: old size: %" V8PRIdPTR
+                           " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
                  old_gen_size / KB, old_generation_allocation_limit_ / KB,
                  factor);
   }
@@ -5203,10 +5201,10 @@
   intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
   if (limit < old_generation_allocation_limit_) {
     if (FLAG_trace_gc_verbose) {
-      PrintIsolate(isolate_, "Dampen: old size: %" V8_PTR_PREFIX
-                             "d KB, old limit: %" V8_PTR_PREFIX
-                             "d KB, "
-                             "new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
+      PrintIsolate(isolate_,
+                   "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
+                   " KB, "
+                   "new limit: %" V8PRIdPTR " KB (%.1f)\n",
                    old_gen_size / KB, old_generation_allocation_limit_ / KB,
                    limit / KB, factor);
     }
@@ -5269,7 +5267,9 @@
   base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
 
   // Set up memory allocator.
-  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
+  memory_allocator_ = new MemoryAllocator(isolate_);
+  if (!memory_allocator_->SetUp(MaxReserved(), MaxExecutableSize(),
+                                code_range_size_))
     return false;
 
   // Initialize incremental marking.
@@ -5286,8 +5286,6 @@
   if (old_space_ == NULL) return false;
   if (!old_space_->SetUp()) return false;
 
-  if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
-
   // Initialize the code space, set its maximum capacity to the old
   // generation size. It needs executable memory.
   code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
@@ -5407,12 +5405,20 @@
 #endif  // DEBUG
 }
 
+void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
+  mark_compact_collector()->SetEmbedderHeapTracer(tracer);
+}
+
+bool Heap::UsingEmbedderHeapTracer() {
+  return mark_compact_collector()->UsingEmbedderHeapTracer();
+}
+
+void Heap::TracePossibleWrapper(JSObject* js_object) {
+  mark_compact_collector()->TracePossibleWrapper(js_object);
+}
+
 void Heap::RegisterExternallyReferencedObject(Object** object) {
-  DCHECK(mark_compact_collector()->in_use());
-  HeapObject* heap_object = HeapObject::cast(*object);
-  DCHECK(Contains(heap_object));
-  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
-  mark_compact_collector()->MarkObject(heap_object, mark_bit);
+  mark_compact_collector()->RegisterExternallyReferencedObject(object);
 }
 
 void Heap::TearDown() {
@@ -5431,7 +5437,7 @@
     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
-    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
+    PrintF("max_alive_after_gc=%" V8PRIdPTR " ", get_max_alive_after_gc());
     PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
     PrintF("total_sweeping_time=%.1f ",
            tracer()->cumulative_sweeping_duration());
@@ -5440,17 +5446,17 @@
 
   if (FLAG_print_max_heap_committed) {
     PrintF("\n");
-    PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
+    PrintF("maximum_committed_by_heap=%" V8PRIdPTR " ",
            MaximumCommittedMemory());
-    PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
+    PrintF("maximum_committed_by_new_space=%" V8PRIdPTR " ",
            new_space_.MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_space=%" V8_PTR_PREFIX "d ",
+    PrintF("maximum_committed_by_old_space=%" V8PRIdPTR " ",
            old_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
+    PrintF("maximum_committed_by_code_space=%" V8PRIdPTR " ",
            code_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
+    PrintF("maximum_committed_by_map_space=%" V8PRIdPTR " ",
            map_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
+    PrintF("maximum_committed_by_lo_space=%" V8PRIdPTR " ",
            lo_space_->MaximumCommittedMemory());
     PrintF("\n\n");
   }
@@ -5490,8 +5496,6 @@
   delete scavenge_job_;
   scavenge_job_ = nullptr;
 
-  WaitUntilUnmappingOfFreeChunksCompleted();
-
   delete array_buffer_tracker_;
   array_buffer_tracker_ = nullptr;
 
@@ -5527,7 +5531,7 @@
 
   store_buffer()->TearDown();
 
-  isolate_->memory_allocator()->TearDown();
+  memory_allocator()->TearDown();
 
   StrongRootsList* next = NULL;
   for (StrongRootsList* list = strong_roots_list_; list; list = next) {
@@ -5535,6 +5539,9 @@
     delete list;
   }
   strong_roots_list_ = NULL;
+
+  delete memory_allocator_;
+  memory_allocator_ = nullptr;
 }
 
 
@@ -5579,12 +5586,6 @@
   UNREACHABLE();
 }
 
-void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
-  DCHECK_NOT_NULL(tracer);
-  CHECK_NULL(embedder_heap_tracer_);
-  embedder_heap_tracer_ = tracer;
-}
-
 // TODO(ishell): Find a better place for this.
 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
                                          Handle<DependentCode> dep) {
@@ -5648,9 +5649,8 @@
   if (new_length != length) retained_maps->SetLength(new_length);
 }
 
-
-void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
-  v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
+void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
+  v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
 }
 
 #ifdef DEBUG
@@ -5673,32 +5673,6 @@
 
 #endif
 
-#ifdef ENABLE_SLOW_DCHECKS
-
-class CountHandleVisitor : public ObjectVisitor {
- public:
-  explicit CountHandleVisitor(Object* object) : object_(object) {}
-
-  void VisitPointers(Object** start, Object** end) override {
-    for (Object** p = start; p < end; p++) {
-      if (object_ == reinterpret_cast<Object*>(*p)) count_++;
-    }
-  }
-
-  int count() { return count_; }
-
- private:
-  Object* object_;
-  int count_ = 0;
-};
-
-int Heap::CountHandlesForObject(Object* object) {
-  CountHandleVisitor v(object);
-  isolate_->handle_scope_implementer()->Iterate(&v);
-  return v.count();
-}
-#endif
-
 class CheckHandleCountVisitor : public ObjectVisitor {
  public:
   CheckHandleCountVisitor() : handle_count_(0) {}
@@ -6255,7 +6229,6 @@
   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
 }
 
-
 void Heap::ExternalStringTable::CleanUp() {
   int last = 0;
   for (int i = 0; i < new_space_strings_.length(); ++i) {
@@ -6290,7 +6263,6 @@
 #endif
 }
 
-
 void Heap::ExternalStringTable::TearDown() {
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
@@ -6303,75 +6275,6 @@
 }
 
 
-class Heap::UnmapFreeMemoryTask : public v8::Task {
- public:
-  UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
-      : heap_(heap), head_(head) {}
-  virtual ~UnmapFreeMemoryTask() {}
-
- private:
-  // v8::Task overrides.
-  void Run() override {
-    heap_->FreeQueuedChunks(head_);
-    heap_->pending_unmapping_tasks_semaphore_.Signal();
-  }
-
-  Heap* heap_;
-  MemoryChunk* head_;
-
-  DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
-};
-
-
-void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
-  while (concurrent_unmapping_tasks_active_ > 0) {
-    pending_unmapping_tasks_semaphore_.Wait();
-    concurrent_unmapping_tasks_active_--;
-  }
-}
-
-
-void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
-  // PreFree logically frees the memory chunk. However, the actual freeing
-  // will happen on a separate thread sometime later.
-  isolate_->memory_allocator()->PreFreeMemory(chunk);
-
-  // The chunks added to this queue will be freed by a concurrent thread.
-  chunk->set_next_chunk(chunks_queued_for_free_);
-  chunks_queued_for_free_ = chunk;
-}
-
-
-void Heap::FreeQueuedChunks() {
-  if (chunks_queued_for_free_ != NULL) {
-    if (FLAG_concurrent_sweeping) {
-      V8::GetCurrentPlatform()->CallOnBackgroundThread(
-          new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
-          v8::Platform::kShortRunningTask);
-    } else {
-      FreeQueuedChunks(chunks_queued_for_free_);
-      pending_unmapping_tasks_semaphore_.Signal();
-    }
-    chunks_queued_for_free_ = NULL;
-  } else {
-    // If we do not have anything to unmap, we just signal the semaphore
-    // that we are done.
-    pending_unmapping_tasks_semaphore_.Signal();
-  }
-  concurrent_unmapping_tasks_active_++;
-}
-
-
-void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
-  MemoryChunk* next;
-  MemoryChunk* chunk;
-  for (chunk = list_head; chunk != NULL; chunk = next) {
-    next = chunk->next_chunk();
-    isolate_->memory_allocator()->PerformFreeMemory(chunk);
-  }
-}
-
-
 void Heap::RememberUnmappedPage(Address page, bool compacted) {
   uintptr_t p = reinterpret_cast<uintptr_t>(page);
   // Tag the page pointer to make it findable in the dump file.
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 9457453..8fdb64a 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -13,7 +13,7 @@
 #include "include/v8.h"
 #include "src/allocation.h"
 #include "src/assert-scope.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
 #include "src/globals.h"
 #include "src/heap-symbols.h"
 // TODO(mstarzinger): Two more includes to kill!
@@ -78,6 +78,7 @@
   V(Oddball, exception, Exception)                                             \
   V(Oddball, termination_exception, TerminationException)                      \
   V(Oddball, optimized_out, OptimizedOut)                                      \
+  V(Oddball, stale_register, StaleRegister)                                    \
   V(FixedArray, number_string_cache, NumberStringCache)                        \
   V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
   V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
@@ -152,6 +153,7 @@
   V(Map, exception_map, ExceptionMap)                                          \
   V(Map, termination_exception_map, TerminationExceptionMap)                   \
   V(Map, optimized_out_map, OptimizedOutMap)                                   \
+  V(Map, stale_register_map, StaleRegisterMap)                                 \
   V(Map, message_object_map, JSMessageObjectMap)                               \
   V(Map, foreign_map, ForeignMap)                                              \
   V(Map, neander_map, NeanderMap)                                              \
@@ -162,8 +164,6 @@
   V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
   V(JSObject, message_listeners, MessageListeners)                             \
   V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
-  V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache)      \
-  V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache)        \
   V(Code, js_entry_code, JsEntryCode)                                          \
   V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
   V(FixedArray, natives_source_cache, NativesSourceCache)                      \
@@ -176,7 +176,6 @@
   V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
   V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
   V(Cell, undefined_cell, UndefinedCell)                                       \
-  V(JSObject, observation_state, ObservationState)                             \
   V(Object, symbol_registry, SymbolRegistry)                                   \
   V(Object, script_list, ScriptList)                                           \
   V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
@@ -189,12 +188,14 @@
   V(ArrayList, retained_maps, RetainedMaps)                                    \
   V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)           \
   V(PropertyCell, array_protector, ArrayProtector)                             \
+  V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
   V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
   V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
   V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
   V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
   V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
-  V(PropertyCell, species_protector, SpeciesProtector)
+  V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
+  V(Cell, species_protector, SpeciesProtector)
 
 // Entries in this list are limited to Smis and are not visited during GC.
 #define SMI_ROOT_LIST(V)                                                   \
@@ -204,8 +205,8 @@
   V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
   V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)       \
   V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)             \
-  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
-
+  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)             \
+  V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
 
 #define ROOT_LIST(V)  \
   STRONG_ROOT_LIST(V) \
@@ -325,7 +326,7 @@
   }
 
   Page* GetHeadPage() {
-    return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+    return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
   }
 
   void SetNewLimit(Address limit) {
@@ -333,7 +334,7 @@
     if (emergency_stack_) return;
 
     // If the limit is not on the same page, we can ignore it.
-    if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
+    if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
 
     limit_ = reinterpret_cast<struct Entry*>(limit);
 
@@ -568,7 +569,7 @@
   static inline bool IsOneByte(T t, int chars);
 
   static void FatalProcessOutOfMemory(const char* location,
-                                      bool take_snapshot = false);
+                                      bool is_heap_oom = false);
 
   static bool RootIsImmortalImmovable(int root_index);
 
@@ -626,11 +627,9 @@
     return old_space_->allocation_limit_address();
   }
 
-  // TODO(hpayer): There is still a missmatch between capacity and actual
-  // committed memory size.
-  bool CanExpandOldGeneration(int size = 0) {
+  bool CanExpandOldGeneration(int size) {
     if (force_oom_) return false;
-    return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
+    return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
   }
 
   // Clear the Instanceof cache (used when a prototype changes).
@@ -730,6 +729,14 @@
   // Returns false if not able to reserve.
   bool ReserveSpace(Reservation* reservations);
 
+  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+  bool UsingEmbedderHeapTracer();
+
+  void TracePossibleWrapper(JSObject* js_object);
+
+  void RegisterExternallyReferencedObject(Object** object);
+
   //
   // Support for the API.
   //
@@ -775,11 +782,6 @@
 
   inline bool OldGenerationAllocationLimitReached();
 
-  void QueueMemoryChunkForFree(MemoryChunk* chunk);
-  void FreeQueuedChunks(MemoryChunk* list_head);
-  void FreeQueuedChunks();
-  void WaitUntilUnmappingOfFreeChunksCompleted();
-
   // Completely clear the Instanceof cache (to stop it keeping objects alive
   // around a GC).
   inline void CompletelyClearInstanceofCache();
@@ -792,6 +794,7 @@
   inline void SetConstructStubDeoptPCOffset(int pc_offset);
   inline void SetGetterStubDeoptPCOffset(int pc_offset);
   inline void SetSetterStubDeoptPCOffset(int pc_offset);
+  inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
 
   // For post mortem debugging.
   void RememberUnmappedPage(Address page, bool compacted);
@@ -916,20 +919,12 @@
   const char* GetSpaceName(int idx);
 
   // ===========================================================================
-  // API. ======================================================================
-  // ===========================================================================
-
-  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
-  void RegisterExternallyReferencedObject(Object** object);
-
-  // ===========================================================================
   // Getters to other components. ==============================================
   // ===========================================================================
 
   GCTracer* tracer() { return tracer_; }
 
-  EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+  MemoryAllocator* memory_allocator() { return memory_allocator_; }
 
   PromotionQueue* promotion_queue() { return &promotion_queue_; }
 
@@ -979,11 +974,6 @@
     roots_[kCodeStubsRootIndex] = value;
   }
 
-  // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
-  void SetRootNonMonomorphicCache(UnseededNumberDictionary* value) {
-    roots_[kNonMonomorphicCacheRootIndex] = value;
-  }
-
   void SetRootMaterializedObjects(FixedArray* objects) {
     roots_[kMaterializedObjectsRootIndex] = objects;
   }
@@ -1089,6 +1079,8 @@
 
   // Write barrier support for object[offset] = o;
   inline void RecordWrite(Object* object, int offset, Object* o);
+  inline void RecordFixedArrayElements(FixedArray* array, int offset,
+                                       int length);
 
   Address* store_buffer_top_address() { return store_buffer()->top_address(); }
 
@@ -1192,6 +1184,9 @@
   // more spaces are needed until it reaches the limit.
   intptr_t Capacity();
 
+  // Returns the capacity of the old generation.
+  intptr_t OldGenerationCapacity();
+
   // Returns the amount of memory currently committed for the heap.
   intptr_t CommittedMemory();
 
@@ -1394,13 +1389,9 @@
   void ReportHeapStatistics(const char* title);
   void ReportCodeStatistics(const char* title);
 #endif
-#ifdef ENABLE_SLOW_DCHECKS
-  int CountHandlesForObject(Object* object);
-#endif
 
  private:
   class PretenuringScope;
-  class UnmapFreeMemoryTask;
 
   // External strings table is a place where all external strings are
   // registered.  We need to keep track of such strings to properly
@@ -2012,11 +2003,11 @@
 
   // This is not the depth of nested AlwaysAllocateScope's but rather a single
   // count, as scopes can be acquired from multiple tasks (read: threads).
-  AtomicNumber<size_t> always_allocate_scope_count_;
+  base::AtomicNumber<size_t> always_allocate_scope_count_;
 
   // Stores the memory pressure level that set by MemoryPressureNotification
   // and reset by a mark-compact garbage collection.
-  AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+  base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
 
   // For keeping track of context disposals.
   int contexts_disposed_;
@@ -2104,7 +2095,6 @@
   int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
 
   GCTracer* tracer_;
-  EmbedderHeapTracer* embedder_heap_tracer_;
 
   int high_survival_rate_period_length_;
   intptr_t promoted_objects_size_;
@@ -2151,6 +2141,8 @@
 
   MarkCompactCollector* mark_compact_collector_;
 
+  MemoryAllocator* memory_allocator_;
+
   StoreBuffer store_buffer_;
 
   IncrementalMarking* incremental_marking_;
@@ -2217,12 +2209,6 @@
 
   ExternalStringTable external_string_table_;
 
-  MemoryChunk* chunks_queued_for_free_;
-
-  size_t concurrent_unmapping_tasks_active_;
-
-  base::Semaphore pending_unmapping_tasks_semaphore_;
-
   base::Mutex relocation_mutex_;
 
   int gc_callbacks_depth_;
@@ -2254,6 +2240,7 @@
   friend class Page;
   friend class Scavenger;
   friend class StoreBuffer;
+  friend class TestMemoryAllocatorScope;
 
   // The allocator interface.
   friend class Factory;
diff --git a/src/heap/incremental-marking-job.cc b/src/heap/incremental-marking-job.cc
index 3ccbec2..fe14dd0 100644
--- a/src/heap/incremental-marking-job.cc
+++ b/src/heap/incremental-marking-job.cc
@@ -71,14 +71,12 @@
 IncrementalMarkingJob::IdleTask::Progress IncrementalMarkingJob::IdleTask::Step(
     Heap* heap, double deadline_in_ms) {
   IncrementalMarking* incremental_marking = heap->incremental_marking();
-  MarkCompactCollector* mark_compact_collector = heap->mark_compact_collector();
   if (incremental_marking->IsStopped()) {
     return kDone;
   }
-  if (mark_compact_collector->sweeping_in_progress()) {
-    if (mark_compact_collector->IsSweepingCompleted()) {
-      mark_compact_collector->EnsureSweepingCompleted();
-    }
+  if (incremental_marking->IsSweeping()) {
+    incremental_marking->FinalizeSweeping();
+    // TODO(hpayer): We can continue here if enough idle time is left.
     return kMoreWork;
   }
   const double remaining_idle_time_in_ms =
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index 376e848..c250b90 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -279,7 +279,7 @@
   if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) {
     Page* page = Page::FromAddress(object->address());
     if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
-      // IterateBlackObject requires us to visit the hole object.
+      // IterateBlackObject requires us to visit the whole object.
       page->ResetProgressBar();
     }
     IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
@@ -353,7 +353,7 @@
     NewSpace* space) {
   NewSpacePageIterator it(space);
   while (it.has_next()) {
-    NewSpacePage* p = it.next();
+    Page* p = it.next();
     SetNewSpacePageFlags(p, false);
   }
 }
@@ -366,7 +366,7 @@
   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
 
   LargePage* lop = heap_->lo_space()->first_page();
-  while (lop->is_valid()) {
+  while (LargePage::IsValid(lop)) {
     SetOldSpacePageFlags(lop, false, false);
     lop = lop->next_page();
   }
@@ -385,7 +385,7 @@
 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
   while (it.has_next()) {
-    NewSpacePage* p = it.next();
+    Page* p = it.next();
     SetNewSpacePageFlags(p, true);
   }
 }
@@ -398,7 +398,7 @@
   ActivateIncrementalWriteBarrier(heap_->new_space());
 
   LargePage* lop = heap_->lo_space()->first_page();
-  while (lop->is_valid()) {
+  while (LargePage::IsValid(lop)) {
     SetOldSpacePageFlags(lop, true, is_compacting_);
     lop = lop->next_page();
   }
@@ -558,12 +558,6 @@
   heap_->CompletelyClearInstanceofCache();
   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
 
-  if (FLAG_cleanup_code_caches_at_gc) {
-    // We will mark cache black with a separate pass
-    // when we finish marking.
-    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
-  }
-
   // Mark strong roots grey.
   IncrementalMarkingRootMarkingVisitor visitor(this);
   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
@@ -605,6 +599,7 @@
 
 
 void IncrementalMarking::MarkObjectGroups() {
+  DCHECK(!heap_->UsingEmbedderHeapTracer());
   DCHECK(!finalize_marking_completed_);
   DCHECK(IsMarking());
 
@@ -735,7 +730,9 @@
   // 4) Remove weak cell with live values from the list of weak cells, they
   // do not need processing during GC.
   MarkRoots();
-  MarkObjectGroups();
+  if (!heap_->UsingEmbedderHeapTracer()) {
+    MarkObjectGroups();
+  }
   if (incremental_marking_finalization_rounds_ == 0) {
     // Map retaining is needed for perfromance, not correctness,
     // so we can do it only once at the beginning of the finalization.
@@ -932,13 +929,6 @@
     }
   }
 
-  if (FLAG_cleanup_code_caches_at_gc) {
-    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
-    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
-    MemoryChunk::IncrementLiveBytesFromGC(poly_cache,
-                                          PolymorphicCodeCache::kSize);
-  }
-
   Object* context = heap_->native_contexts_list();
   while (!context->IsUndefined()) {
     // GC can happen when the context is not fully initialized,
@@ -952,7 +942,7 @@
         MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
       }
     }
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+    context = Context::cast(context)->next_context_link();
   }
 }
 
@@ -1130,6 +1120,18 @@
   }
 }
 
+void IncrementalMarking::FinalizeSweeping() {
+  DCHECK(state_ == SWEEPING);
+  if (heap_->mark_compact_collector()->sweeping_in_progress() &&
+      (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
+       !FLAG_concurrent_sweeping)) {
+    heap_->mark_compact_collector()->EnsureSweepingCompleted();
+  }
+  if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+    bytes_scanned_ = 0;
+    StartMarking();
+  }
+}
 
 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
                                   CompletionAction action,
@@ -1179,17 +1181,11 @@
 
     bytes_scanned_ += bytes_to_process;
 
+    // TODO(hpayer): Do not account for sweeping finalization while marking.
     if (state_ == SWEEPING) {
-      if (heap_->mark_compact_collector()->sweeping_in_progress() &&
-          (heap_->mark_compact_collector()->IsSweepingCompleted() ||
-           !FLAG_concurrent_sweeping)) {
-        heap_->mark_compact_collector()->EnsureSweepingCompleted();
-      }
-      if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
-        bytes_scanned_ = 0;
-        StartMarking();
-      }
+      FinalizeSweeping();
     }
+
     if (state_ == MARKING) {
       bytes_processed = ProcessMarkingDeque(bytes_to_process);
       if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index f10150d..9c5a3b5 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -68,6 +68,8 @@
 
   inline bool IsStopped() { return state() == STOPPED; }
 
+  inline bool IsSweeping() { return state() == SWEEPING; }
+
   INLINE(bool IsMarking()) { return state() >= MARKING; }
 
   inline bool IsMarkingIncomplete() { return state() == MARKING; }
@@ -135,6 +137,8 @@
   // incremental marking to be postponed.
   static const size_t kMaxIdleMarkingDelayCounter = 3;
 
+  void FinalizeSweeping();
+
   void OldSpaceStep(intptr_t allocated);
 
   intptr_t Step(intptr_t allocated, CompletionAction action,
@@ -181,7 +185,7 @@
     SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
   }
 
-  inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
+  inline void SetNewSpacePageFlags(Page* chunk) {
     SetNewSpacePageFlags(chunk, IsMarking());
   }
 
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index 281ece4..455f443 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -12,17 +12,6 @@
 namespace v8 {
 namespace internal {
 
-inline std::vector<Page*>& MarkCompactCollector::sweeping_list(Space* space) {
-  if (space == heap()->old_space()) {
-    return sweeping_list_old_space_;
-  } else if (space == heap()->code_space()) {
-    return sweeping_list_code_space_;
-  }
-  DCHECK_EQ(space, heap()->map_space());
-  return sweeping_list_map_space_;
-}
-
-
 void MarkCompactCollector::PushBlack(HeapObject* obj) {
   DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
   if (marking_deque_.Push(obj)) {
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index e537689..b2ae93d 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -49,21 +49,22 @@
 
 MarkCompactCollector::MarkCompactCollector(Heap* heap)
     :  // NOLINT
+      heap_(heap),
+      page_parallel_job_semaphore_(0),
 #ifdef DEBUG
       state_(IDLE),
 #endif
       marking_parity_(ODD_MARKING_PARITY),
       was_marked_incrementally_(false),
       evacuation_(false),
-      heap_(heap),
+      compacting_(false),
+      black_allocation_(false),
+      have_code_to_deoptimize_(false),
       marking_deque_memory_(NULL),
       marking_deque_memory_committed_(0),
       code_flusher_(nullptr),
-      have_code_to_deoptimize_(false),
-      compacting_(false),
-      sweeping_in_progress_(false),
-      pending_sweeper_tasks_semaphore_(0),
-      pending_compaction_tasks_semaphore_(0) {
+      embedder_heap_tracer_(nullptr),
+      sweeper_(heap) {
 }
 
 #ifdef VERIFY_HEAP
@@ -134,10 +135,9 @@
   NewSpacePageIterator it(space->bottom(), end);
   // The bottom position is at the start of its page. Allows us to use
   // page->area_start() as start of range on all pages.
-  CHECK_EQ(space->bottom(),
-           NewSpacePage::FromAddress(space->bottom())->area_start());
+  CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     Address limit = it.has_next() ? page->area_end() : end;
     CHECK(limit == end || !page->Contains(end));
     VerifyMarking(space->heap(), page->area_start(), limit);
@@ -209,7 +209,7 @@
   VerifyEvacuationVisitor visitor;
 
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     Address current = page->area_start();
     Address limit = it.has_next() ? page->area_end() : space->top();
     CHECK(limit == space->top() || !page->Contains(space->top()));
@@ -375,7 +375,7 @@
   NewSpacePageIterator it(space->bottom(), space->top());
 
   while (it.has_next()) {
-    NewSpacePage* p = it.next();
+    Page* p = it.next();
     CHECK(p->markbits()->IsClean());
     CHECK_EQ(0, p->LiveBytes());
   }
@@ -459,11 +459,13 @@
   }
 }
 
-
-class MarkCompactCollector::SweeperTask : public v8::Task {
+class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
  public:
-  SweeperTask(Heap* heap, AllocationSpace space_to_start)
-      : heap_(heap), space_to_start_(space_to_start) {}
+  SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
+              AllocationSpace space_to_start)
+      : sweeper_(sweeper),
+        pending_sweeper_tasks_(pending_sweeper_tasks),
+        space_to_start_(space_to_start) {}
 
   virtual ~SweeperTask() {}
 
@@ -478,33 +480,45 @@
       const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
       DCHECK_GE(space_id, FIRST_PAGED_SPACE);
       DCHECK_LE(space_id, LAST_PAGED_SPACE);
-      heap_->mark_compact_collector()->SweepInParallel(
-          heap_->paged_space(space_id), 0);
+      sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
     }
-    heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
+    pending_sweeper_tasks_->Signal();
   }
 
-  Heap* heap_;
+  Sweeper* sweeper_;
+  base::Semaphore* pending_sweeper_tasks_;
   AllocationSpace space_to_start_;
 
   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
 };
 
-
-void MarkCompactCollector::StartSweeperThreads() {
-  V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
-  V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask);
-  V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask);
+void MarkCompactCollector::Sweeper::StartSweeping() {
+  sweeping_in_progress_ = true;
+  ForAllSweepingSpaces([this](AllocationSpace space) {
+    std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
+              [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
+  });
+  if (FLAG_concurrent_sweeping) {
+    ForAllSweepingSpaces([this](AllocationSpace space) {
+      if (space == NEW_SPACE) return;
+      StartSweepingHelper(space);
+    });
+  }
 }
 
+void MarkCompactCollector::Sweeper::StartSweepingHelper(
+    AllocationSpace space_to_start) {
+  num_sweeping_tasks_++;
+  V8::GetCurrentPlatform()->CallOnBackgroundThread(
+      new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
+      v8::Platform::kShortRunningTask);
+}
 
-void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
+void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
+    Page* page) {
   PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
   if (!page->SweepingDone()) {
-    SweepInParallel(page, owner);
+    ParallelSweepPage(page, owner);
     if (!page->SweepingDone()) {
       // We were not able to sweep that page, i.e., a concurrent
       // sweeper thread currently owns this page. Wait for the sweeper
@@ -514,34 +528,49 @@
   }
 }
 
-
 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
-  if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
-    SweepInParallel(heap()->paged_space(space->identity()), 0);
+  if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
+    sweeper().ParallelSweepSpace(space->identity(), 0);
     space->RefillFreeList();
   }
 }
 
+Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  SweptList& list = swept_list_[space->identity()];
+  if (list.length() > 0) {
+    return list.RemoveLast();
+  }
+  return nullptr;
+}
 
-void MarkCompactCollector::EnsureSweepingCompleted() {
-  DCHECK(sweeping_in_progress_ == true);
+void MarkCompactCollector::Sweeper::EnsureCompleted() {
+  if (!sweeping_in_progress_) return;
 
   // If sweeping is not completed or not running at all, we try to complete it
   // here.
   if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
-    SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
-    SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
-    SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
+    ForAllSweepingSpaces(
+        [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
   }
 
   if (FLAG_concurrent_sweeping) {
-    pending_sweeper_tasks_semaphore_.Wait();
-    pending_sweeper_tasks_semaphore_.Wait();
-    pending_sweeper_tasks_semaphore_.Wait();
+    while (num_sweeping_tasks_ > 0) {
+      pending_sweeper_tasks_semaphore_.Wait();
+      num_sweeping_tasks_--;
+    }
   }
 
-  ParallelSweepSpacesComplete();
+  ForAllSweepingSpaces(
+      [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); });
+  late_pages_ = false;
   sweeping_in_progress_ = false;
+}
+
+void MarkCompactCollector::EnsureSweepingCompleted() {
+  if (!sweeper().sweeping_in_progress()) return;
+
+  sweeper().EnsureCompleted();
   heap()->old_space()->RefillFreeList();
   heap()->code_space()->RefillFreeList();
   heap()->map_space()->RefillFreeList();
@@ -553,8 +582,7 @@
 #endif
 }
 
-
-bool MarkCompactCollector::IsSweepingCompleted() {
+bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
   if (!pending_sweeper_tasks_semaphore_.WaitFor(
           base::TimeDelta::FromSeconds(0))) {
     return false;
@@ -563,7 +591,6 @@
   return true;
 }
 
-
 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
   // This is only used when resizing an object.
   DCHECK(MemoryChunk::FromAddress(old_start) ==
@@ -759,8 +786,8 @@
       if (FLAG_trace_fragmentation_verbose) {
         PrintIsolate(isolate(),
                      "compaction-selection-page: space=%s free_bytes_page=%d "
-                     "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
-                     "sum_compaction_kb=%d "
+                     "fragmentation_limit_kb=%" V8PRIdPTR
+                     " fragmentation_limit_percent=%d sum_compaction_kb=%d "
                      "compaction_limit_kb=%d\n",
                      AllocationSpaceName(space->identity()), free_bytes / KB,
                      free_bytes_threshold / KB, target_fragmentation_percent,
@@ -821,7 +848,7 @@
 
   // If concurrent unmapping tasks are still running, we should wait for
   // them here.
-  heap()->WaitUntilUnmappingOfFreeChunksCompleted();
+  heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
 
   // Clear marking bits if incremental marking is aborted.
   if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
@@ -857,6 +884,12 @@
 void MarkCompactCollector::Finish() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
 
+  if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
+    // If we added some more pages during MC, we need to start at least one
+    // more task as all other tasks might already be finished.
+    sweeper().StartSweepingHelper(OLD_SPACE);
+  }
+
   // The hashing of weak_object_to_code_table is no longer valid.
   heap()->weak_object_to_code_table()->Rehash(
       heap()->isolate()->factory()->undefined_value());
@@ -1373,8 +1406,8 @@
   void MarkObjectByPointer(Object** p) {
     if (!(*p)->IsHeapObject()) return;
 
-    // Replace flat cons strings in place.
     HeapObject* object = HeapObject::cast(*p);
+
     MarkBit mark_bit = Marking::MarkBitFrom(object);
     if (Marking::IsBlackOrGrey(mark_bit)) return;
 
@@ -1633,7 +1666,7 @@
         semispace_copied_size_(0),
         local_pretenuring_feedback_(local_pretenuring_feedback) {}
 
-  bool Visit(HeapObject* object) override {
+  inline bool Visit(HeapObject* object) override {
     heap_->UpdateAllocationSite<Heap::kCached>(object,
                                                local_pretenuring_feedback_);
     int size = object->Size();
@@ -1727,8 +1760,8 @@
         compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
                                                         alignment);
     if (allocation.IsRetry()) {
-      FatalProcessOutOfMemory(
-          "MarkCompactCollector: semi-space copy, fallback in old gen\n");
+      v8::internal::Heap::FatalProcessOutOfMemory(
+          "MarkCompactCollector: semi-space copy, fallback in old gen", true);
     }
     return allocation;
   }
@@ -1765,6 +1798,34 @@
   HashMap* local_pretenuring_feedback_;
 };
 
+class MarkCompactCollector::EvacuateNewSpacePageVisitor final
+    : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+  EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
+
+  static void TryMoveToOldSpace(Page* page, PagedSpace* owner) {
+    if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) {
+      Page* new_page = Page::ConvertNewToOld(page, owner);
+      new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+    }
+  }
+
+  inline bool Visit(HeapObject* object) {
+    if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
+      object->GetHeap()->array_buffer_tracker()->Promote(
+          JSArrayBuffer::cast(object));
+    }
+    RecordMigratedSlotVisitor visitor;
+    object->IterateBodyFast(&visitor);
+    promoted_size_ += object->Size();
+    return true;
+  }
+
+  intptr_t promoted_size() { return promoted_size_; }
+
+ private:
+  intptr_t promoted_size_;
+};
 
 class MarkCompactCollector::EvacuateOldSpaceVisitor final
     : public MarkCompactCollector::EvacuateVisitorBase {
@@ -1773,7 +1834,7 @@
                           CompactionSpaceCollection* compaction_spaces)
       : EvacuateVisitorBase(heap, compaction_spaces) {}
 
-  bool Visit(HeapObject* object) override {
+  inline bool Visit(HeapObject* object) override {
     CompactionSpace* target_space = compaction_spaces_->Get(
         Page::FromAddress(object->address())->owner()->identity());
     HeapObject* target_object = nullptr;
@@ -1785,6 +1846,28 @@
   }
 };
 
+class MarkCompactCollector::EvacuateRecordOnlyVisitor final
+    : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+  explicit EvacuateRecordOnlyVisitor(AllocationSpace space) : space_(space) {}
+
+  inline bool Visit(HeapObject* object) {
+    if (space_ == OLD_SPACE) {
+      RecordMigratedSlotVisitor visitor;
+      object->IterateBody(&visitor);
+    } else {
+      DCHECK_EQ(space_, CODE_SPACE);
+      // Add a typed slot for the whole code object.
+      RememberedSet<OLD_TO_OLD>::InsertTyped(
+          Page::FromAddress(object->address()), RELOCATED_CODE_OBJECT,
+          object->address());
+    }
+    return true;
+  }
+
+ private:
+  AllocationSpace space_;
+};
 
 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
   PageIterator it(space);
@@ -1802,7 +1885,7 @@
   NewSpace* space = heap()->new_space();
   NewSpacePageIterator it(space->bottom(), space->top());
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     DiscoverGreyObjectsOnPage(page);
     if (marking_deque()->IsFull()) return;
   }
@@ -1961,14 +2044,17 @@
   }
 }
 
-
 // Mark all objects reachable (transitively) from objects on the marking
 // stack including references only considered in the atomic marking pause.
 void MarkCompactCollector::ProcessEphemeralMarking(
     ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
-  bool work_to_do = true;
   DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
+  bool work_to_do = true;
   while (work_to_do) {
+    if (UsingEmbedderHeapTracer()) {
+      embedder_heap_tracer()->TraceWrappersFrom(wrappers_to_trace_);
+      wrappers_to_trace_.clear();
+    }
     if (!only_process_harmony_weak_collections) {
       isolate()->global_handles()->IterateObjectGroups(
           visitor, &IsUnmarkedHeapObjectWithHeap);
@@ -1980,7 +2066,6 @@
   }
 }
 
-
 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
        !it.done(); it.Advance()) {
@@ -2079,6 +2164,32 @@
   in_use_ = false;
 }
 
+void MarkCompactCollector::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
+  DCHECK_NOT_NULL(tracer);
+  CHECK_NULL(embedder_heap_tracer_);
+  embedder_heap_tracer_ = tracer;
+}
+
+void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
+  DCHECK(js_object->WasConstructedFromApiFunction());
+  if (js_object->GetInternalFieldCount() >= 2 &&
+      js_object->GetInternalField(0) &&
+      js_object->GetInternalField(0) != heap_->undefined_value() &&
+      js_object->GetInternalField(1) != heap_->undefined_value()) {
+    DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
+    wrappers_to_trace().push_back(std::pair<void*, void*>(
+        reinterpret_cast<void*>(js_object->GetInternalField(0)),
+        reinterpret_cast<void*>(js_object->GetInternalField(1))));
+  }
+}
+
+void MarkCompactCollector::RegisterExternallyReferencedObject(Object** object) {
+  DCHECK(in_use());
+  HeapObject* heap_object = HeapObject::cast(*object);
+  DCHECK(heap_->Contains(heap_object));
+  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+  MarkObject(heap_object, mark_bit);
+}
 
 void MarkCompactCollector::MarkLiveObjects() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
@@ -2135,8 +2246,11 @@
     {
       TRACE_GC(heap()->tracer(),
                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
+      if (UsingEmbedderHeapTracer()) {
+        embedder_heap_tracer()->TracePrologue();
+        ProcessMarkingDeque();
+      }
       ProcessEphemeralMarking(&root_visitor, false);
-      ProcessMarkingDeque();
     }
 
     // The objects reachable from the roots, weak maps or object groups
@@ -2170,7 +2284,9 @@
     {
       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
       ProcessEphemeralMarking(&root_visitor, true);
-      ProcessMarkingDeque();
+      if (UsingEmbedderHeapTracer()) {
+        embedder_heap_tracer()->TraceEpilogue();
+      }
     }
   }
 
@@ -2930,9 +3046,16 @@
   newspace_evacuation_candidates_.Rewind(0);
 }
 
-
 class MarkCompactCollector::Evacuator : public Malloced {
  public:
+  // NewSpacePages with more live bytes than this threshold qualify for fast
+  // evacuation.
+  static int PageEvacuationThreshold() {
+    if (FLAG_page_promotion)
+      return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
+    return Page::kAllocatableMemory + kPointerSize;
+  }
+
   explicit Evacuator(MarkCompactCollector* collector)
       : collector_(collector),
         compaction_spaces_(collector->heap()),
@@ -2940,11 +3063,12 @@
                                     kInitialLocalPretenuringFeedbackCapacity),
         new_space_visitor_(collector->heap(), &compaction_spaces_,
                            &local_pretenuring_feedback_),
+        new_space_page_visitor(),
         old_space_visitor_(collector->heap(), &compaction_spaces_),
         duration_(0.0),
         bytes_compacted_(0) {}
 
-  inline bool EvacuatePage(MemoryChunk* chunk);
+  inline bool EvacuatePage(Page* chunk);
 
   // Merge back locally cached info sequentially. Note that this method needs
   // to be called from the main thread.
@@ -2953,16 +3077,32 @@
   CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
 
  private:
+  enum EvacuationMode {
+    kObjectsNewToOld,
+    kPageNewToOld,
+    kObjectsOldToOld,
+  };
+
   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
 
-  Heap* heap() { return collector_->heap(); }
+  inline Heap* heap() { return collector_->heap(); }
+
+  inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
+    // Note: The order of checks is important in this function.
+    if (chunk->InNewSpace()) return kObjectsNewToOld;
+    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
+      return kPageNewToOld;
+    DCHECK(chunk->IsEvacuationCandidate());
+    return kObjectsOldToOld;
+  }
 
   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
     duration_ += duration;
     bytes_compacted_ += bytes_compacted;
   }
 
-  inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
+  template <IterationMode mode, class Visitor>
+  inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
 
   MarkCompactCollector* collector_;
 
@@ -2972,6 +3112,7 @@
 
   // Visitors for the corresponding spaces.
   EvacuateNewSpaceVisitor new_space_visitor_;
+  EvacuateNewSpacePageVisitor new_space_page_visitor;
   EvacuateOldSpaceVisitor old_space_visitor_;
 
   // Book keeping info.
@@ -2979,22 +3120,32 @@
   intptr_t bytes_compacted_;
 };
 
-bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
-    MemoryChunk* p, HeapObjectVisitor* visitor) {
+template <MarkCompactCollector::IterationMode mode, class Visitor>
+bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
+                                                         Visitor* visitor) {
   bool success = false;
-  DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
+  DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
+         p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
   int saved_live_bytes = p->LiveBytes();
   double evacuation_time;
   {
     AlwaysAllocateScope always_allocate(heap()->isolate());
     TimedScope timed_scope(&evacuation_time);
-    success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
+    success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
   }
   if (FLAG_trace_evacuation) {
+    const char age_mark_tag =
+        !p->InNewSpace()
+            ? 'x'
+            : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
+                  ? '>'
+                  : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
+                                                                       : '#';
     PrintIsolate(heap()->isolate(),
-                 "evacuation[%p]: page=%p new_space=%d executable=%d "
-                 "live_bytes=%d time=%f\n",
-                 this, p, p->InNewSpace(),
+                 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
+                 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
+                 this, p, p->InNewSpace(), age_mark_tag,
+                 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
                  p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
                  evacuation_time);
   }
@@ -3004,20 +3155,38 @@
   return success;
 }
 
-bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
-  bool success = false;
-  if (chunk->InNewSpace()) {
-    DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
-              NewSpacePage::kSweepingDone);
-    success = EvacuateSinglePage(chunk, &new_space_visitor_);
-    DCHECK(success);
-    USE(success);
-  } else {
-    DCHECK(chunk->IsEvacuationCandidate());
-    DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
-    success = EvacuateSinglePage(chunk, &old_space_visitor_);
+bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
+  bool result = false;
+  DCHECK(page->SweepingDone());
+  switch (ComputeEvacuationMode(page)) {
+    case kObjectsNewToOld:
+      result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
+      DCHECK(result);
+      USE(result);
+      break;
+    case kPageNewToOld:
+      result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
+      DCHECK(result);
+      USE(result);
+      break;
+    case kObjectsOldToOld:
+      result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
+      if (!result) {
+        // Aborted compaction page. We can record slots here to have them
+        // processed in parallel later on.
+        EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
+        result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
+        DCHECK(result);
+        USE(result);
+        // We need to return failure here to indicate that we want this page
+        // added to the sweeper.
+        return false;
+      }
+      break;
+    default:
+      UNREACHABLE();
   }
-  return success;
+  return result;
 }
 
 void MarkCompactCollector::Evacuator::Finalize() {
@@ -3025,12 +3194,14 @@
   heap()->code_space()->MergeCompactionSpace(
       compaction_spaces_.Get(CODE_SPACE));
   heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
-  heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
+  heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
+                                       new_space_page_visitor.promoted_size());
   heap()->IncrementSemiSpaceCopiedObjectSize(
       new_space_visitor_.semispace_copied_size());
   heap()->IncrementYoungSurvivorsCounter(
       new_space_visitor_.promoted_size() +
-      new_space_visitor_.semispace_copied_size());
+      new_space_visitor_.semispace_copied_size() +
+      new_space_page_visitor.promoted_size());
   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
 }
 
@@ -3073,13 +3244,21 @@
 
   static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
                                     MemoryChunk* chunk, PerPageData) {
-    return evacuator->EvacuatePage(chunk);
+    return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
   }
 
-  static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success,
-                                       PerPageData data) {
+  static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
+                                       bool success, PerPageData data) {
     if (chunk->InNewSpace()) {
       DCHECK(success);
+    } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+      DCHECK(success);
+      Page* p = static_cast<Page*>(chunk);
+      p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+      p->ForAllFreeListCategories(
+          [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+      heap->mark_compact_collector()->sweeper().AddLatePage(
+          p->owner()->identity(), p);
     } else {
       Page* p = static_cast<Page*>(chunk);
       if (success) {
@@ -3089,17 +3268,10 @@
       } else {
         // We have partially compacted the page, i.e., some objects may have
         // moved, others are still in place.
-        // We need to:
-        // - Leave the evacuation candidate flag for later processing of slots
-        //   buffer entries.
-        // - Leave the slots buffer there for processing of entries added by
-        //   the write barrier.
-        // - Rescan the page as slot recording in the migration buffer only
-        //   happens upon moving (which we potentially didn't do).
-        // - Leave the page in the list of pages of a space since we could not
-        //   fully evacuate it.
-        DCHECK(p->IsEvacuationCandidate());
         p->SetFlag(Page::COMPACTION_WAS_ABORTED);
+        p->ClearEvacuationCandidate();
+        // Slots have already been recorded so we just need to add it to the
+        // sweeper.
         *data += 1;
       }
     }
@@ -3108,7 +3280,8 @@
 
 void MarkCompactCollector::EvacuatePagesInParallel() {
   PageParallelJob<EvacuationJobTraits> job(
-      heap_, heap_->isolate()->cancelable_task_manager());
+      heap_, heap_->isolate()->cancelable_task_manager(),
+      &page_parallel_job_semaphore_);
 
   int abandoned_pages = 0;
   intptr_t live_bytes = 0;
@@ -3116,8 +3289,16 @@
     live_bytes += page->LiveBytes();
     job.AddPage(page, &abandoned_pages);
   }
-  for (NewSpacePage* page : newspace_evacuation_candidates_) {
+
+  const Address age_mark = heap()->new_space()->age_mark();
+  for (Page* page : newspace_evacuation_candidates_) {
     live_bytes += page->LiveBytes();
+    if (!page->NeverEvacuate() &&
+        (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
+        page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
+        !page->Contains(age_mark)) {
+      EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space());
+    }
     job.AddPage(page, &abandoned_pages);
   }
   DCHECK_GE(job.NumberOfPages(), 1);
@@ -3142,16 +3323,15 @@
   delete[] evacuators;
 
   if (FLAG_trace_evacuation) {
-    PrintIsolate(
-        isolate(),
-        "%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d "
-        "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
-        "d compaction_speed=%.f\n",
-        isolate()->time_millis_since_init(),
-        FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
-        abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
-        V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
-        live_bytes, compaction_speed);
+    PrintIsolate(isolate(),
+                 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
+                 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
+                 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
+                 isolate()->time_millis_since_init(),
+                 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
+                 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
+                 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
+                 live_bytes, compaction_speed);
   }
 }
 
@@ -3169,28 +3349,18 @@
   }
 };
 
-enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
-
-enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
-
-enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
-
-// Sweeps a page. After sweeping the page can be iterated.
-// Slots in live objects pointing into evacuation candidates are updated
-// if requested.
-// Returns the size of the biggest continuous freed memory chunk in bytes.
-template <SweepingMode sweeping_mode,
-          MarkCompactCollector::SweepingParallelism parallelism,
-          SkipListRebuildingMode skip_list_mode,
-          FreeSpaceTreatmentMode free_space_mode>
-static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
+template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
+          MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
+          MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
+          MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
+int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
+                                            ObjectVisitor* v) {
   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
   DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
   DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
             space->identity() == CODE_SPACE);
   DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
-  DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
-         sweeping_mode == SWEEP_ONLY);
+  DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
 
   Address free_start = p->area_start();
   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
@@ -3252,7 +3422,6 @@
   return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
 }
 
-
 void MarkCompactCollector::InvalidateCode(Code* code) {
   if (heap_->incremental_marking()->IsCompacting() &&
       !ShouldSkipEvacuationSlotRecording(code)) {
@@ -3289,9 +3458,8 @@
 }
 #endif  // VERIFY_HEAP
 
-
-bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
-                                            HeapObjectVisitor* visitor,
+template <class Visitor>
+bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
                                             IterationMode mode) {
 #ifdef VERIFY_HEAP
   VerifyAllBlackObjects(page);
@@ -3349,41 +3517,12 @@
   }
 }
 
-
-void MarkCompactCollector::SweepAbortedPages() {
-  // Second pass on aborted pages.
-  for (Page* p : evacuation_candidates_) {
-    if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
-      p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
-      p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
-      PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-      switch (space->identity()) {
-        case OLD_SPACE:
-          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, p, nullptr);
-          break;
-        case CODE_SPACE:
-          if (FLAG_zap_code_space) {
-            Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                  ZAP_FREE_SPACE>(space, p, nullptr);
-          } else {
-            Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                  IGNORE_FREE_SPACE>(space, p, nullptr);
-          }
-          break;
-        default:
-          UNREACHABLE();
-          break;
-      }
-      {
-        base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
-        swept_pages(space->identity())->Add(p);
-      }
-    }
-  }
+void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
+                                                     Page* page) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  swept_list_[space->identity()].Add(page);
 }
 
-
 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
   Heap::RelocationLock relocation_lock(heap());
@@ -3404,13 +3543,22 @@
   // slots only handles old space (for unboxed doubles), and thus map space can
   // still contain stale pointers. We only free the chunks after pointer updates
   // to still have access to page headers.
-  heap()->FreeQueuedChunks();
+  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
 
   {
     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
-    // After updating all pointers, we can finally sweep the aborted pages,
-    // effectively overriding any forward pointers.
-    SweepAbortedPages();
+
+    for (Page* p : evacuation_candidates_) {
+      // Important: skip list should be cleared only after roots were updated
+      // because root iteration traverses the stack and might have to find
+      // code objects from non-updated pc pointing into evacuation candidate.
+      SkipList* list = p->skip_list();
+      if (list != NULL) list->Clear();
+      if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+        sweeper().AddLatePage(p->owner()->identity(), p);
+        p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
+      }
+    }
 
     // EvacuateNewSpaceAndCandidates iterates over new space objects and for
     // ArrayBuffers either re-registers them as live or promotes them. This is
@@ -3422,7 +3570,7 @@
   }
 
 #ifdef VERIFY_HEAP
-  if (FLAG_verify_heap && !sweeping_in_progress_) {
+  if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
     VerifyEvacuation(heap());
   }
 #endif
@@ -3472,12 +3620,8 @@
 
   static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
     MapWord map_word = object->map_word();
-    // Since we only filter invalid slots in old space, the store buffer can
-    // still contain stale pointers in large object and in map spaces. Ignore
-    // these pointers here.
-    DCHECK(map_word.IsForwardingAddress() ||
-           !object->GetHeap()->old_space()->Contains(
-               reinterpret_cast<Address>(address)));
+    // There could still be stale pointers in large object space, map space,
+    // and old space for pages that have been promoted.
     if (map_word.IsForwardingAddress()) {
       // Update the corresponding slot.
       *address = map_word.ToForwardingAddress();
@@ -3493,9 +3637,9 @@
 }
 
 template <PointerDirection direction>
-void UpdatePointersInParallel(Heap* heap) {
+void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
   PageParallelJob<PointerUpdateJobTraits<direction> > job(
-      heap, heap->isolate()->cancelable_task_manager());
+      heap, heap->isolate()->cancelable_task_manager(), semaphore);
   RememberedSet<direction>::IterateMemoryChunks(
       heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
   PointersUpdatingVisitor visitor(heap);
@@ -3525,14 +3669,14 @@
   }
 };
 
-void UpdateToSpacePointersInParallel(Heap* heap) {
+void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
   PageParallelJob<ToSpacePointerUpdateJobTraits> job(
-      heap, heap->isolate()->cancelable_task_manager());
+      heap, heap->isolate()->cancelable_task_manager(), semaphore);
   Address space_start = heap->new_space()->bottom();
   Address space_end = heap->new_space()->top();
   NewSpacePageIterator it(space_start, space_end);
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     Address start =
         page->Contains(space_start) ? space_start : page->area_start();
     Address end = page->Contains(space_end) ? space_end : page->area_end();
@@ -3551,36 +3695,17 @@
   {
     TRACE_GC(heap()->tracer(),
              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
-    UpdateToSpacePointersInParallel(heap_);
+    UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
     // Update roots.
     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
-    UpdatePointersInParallel<OLD_TO_NEW>(heap_);
+    UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
   }
 
   {
     Heap* heap = this->heap();
     TRACE_GC(heap->tracer(),
              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
-    UpdatePointersInParallel<OLD_TO_OLD>(heap_);
-  }
-
-  {
-    TRACE_GC(heap()->tracer(),
-             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
-    for (Page* p : evacuation_candidates_) {
-      DCHECK(p->IsEvacuationCandidate());
-      // Important: skip list should be cleared only after roots were updated
-      // because root iteration traverses the stack and might have to find
-      // code objects from non-updated pc pointing into evacuation candidate.
-      SkipList* list = p->skip_list();
-      if (list != NULL) list->Clear();
-
-      // First pass on aborted pages, fixing up all live objects.
-      if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
-        p->ClearEvacuationCandidate();
-        VisitLiveObjectsBody(p, &updating_visitor);
-      }
-    }
+    UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
   }
 
   {
@@ -3606,33 +3731,29 @@
   }
   evacuation_candidates_.Rewind(0);
   compacting_ = false;
-  heap()->FreeQueuedChunks();
+  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
 }
 
-
-int MarkCompactCollector::SweepInParallel(PagedSpace* space,
-                                          int required_freed_bytes,
-                                          int max_pages) {
+int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
+                                                      int required_freed_bytes,
+                                                      int max_pages) {
   int max_freed = 0;
-  int max_freed_overall = 0;
-  int page_count = 0;
-  for (Page* p : sweeping_list(space)) {
-    max_freed = SweepInParallel(p, space);
-    DCHECK(max_freed >= 0);
-    if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
+  int pages_freed = 0;
+  Page* page = nullptr;
+  while ((page = GetSweepingPageSafe(identity)) != nullptr) {
+    int freed = ParallelSweepPage(page, heap_->paged_space(identity));
+    pages_freed += 1;
+    DCHECK_GE(freed, 0);
+    max_freed = Max(max_freed, freed);
+    if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
       return max_freed;
-    }
-    max_freed_overall = Max(max_freed, max_freed_overall);
-    page_count++;
-    if (max_pages > 0 && page_count >= max_pages) {
-      break;
-    }
+    if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
   }
-  return max_freed_overall;
+  return max_freed;
 }
 
-
-int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
+int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
+                                                     PagedSpace* space) {
   int max_freed = 0;
   if (page->mutex()->TryLock()) {
     // If this page was already swept in the meantime, we can return here.
@@ -3642,18 +3763,18 @@
     }
     page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
     if (space->identity() == OLD_SPACE) {
-      max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                        IGNORE_FREE_SPACE>(space, page, NULL);
+      max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                           IGNORE_FREE_SPACE>(space, page, NULL);
     } else if (space->identity() == CODE_SPACE) {
-      max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
-                        IGNORE_FREE_SPACE>(space, page, NULL);
+      max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
+                           IGNORE_FREE_SPACE>(space, page, NULL);
     } else {
-      max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                        IGNORE_FREE_SPACE>(space, page, NULL);
+      max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                           IGNORE_FREE_SPACE>(space, page, NULL);
     }
     {
-      base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
-      swept_pages(space->identity())->Add(page);
+      base::LockGuard<base::Mutex> guard(&mutex_);
+      swept_list_[space->identity()].Add(page);
     }
     page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
     page->mutex()->Unlock();
@@ -3661,6 +3782,43 @@
   return max_freed;
 }
 
+void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
+  DCHECK(!sweeping_in_progress_);
+  PrepareToBeSweptPage(space, page);
+  sweeping_list_[space].push_back(page);
+}
+
+void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
+                                                Page* page) {
+  DCHECK(sweeping_in_progress_);
+  PrepareToBeSweptPage(space, page);
+  late_pages_ = true;
+  AddSweepingPageSafe(space, page);
+}
+
+void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
+                                                         Page* page) {
+  page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+  int to_sweep = page->area_size() - page->LiveBytes();
+  heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
+}
+
+Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
+    AllocationSpace space) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  Page* page = nullptr;
+  if (!sweeping_list_[space].empty()) {
+    page = sweeping_list_[space].front();
+    sweeping_list_[space].pop_front();
+  }
+  return page;
+}
+
+void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
+                                                        Page* page) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  sweeping_list_[space].push_back(page);
+}
 
 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
   space->ClearStats();
@@ -3696,8 +3854,9 @@
       // (in the free list) dropped again. Since we only use the flag for
       // testing this is fine.
       p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
-      Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-            IGNORE_FREE_SPACE>(space, p, nullptr);
+      Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
+                        Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
+          space, p, nullptr);
       continue;
     }
 
@@ -3713,10 +3872,7 @@
       unused_page_present = true;
     }
 
-    p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
-    sweeping_list(space).push_back(p);
-    int to_sweep = p->area_size() - p->LiveBytes();
-    space->accounting_stats_.ShrinkSpace(to_sweep);
+    sweeper().AddPage(space->identity(), p);
     will_be_swept++;
   }
 
@@ -3724,8 +3880,6 @@
     PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
                  AllocationSpaceName(space->identity()), will_be_swept);
   }
-  std::sort(sweeping_list(space).begin(), sweeping_list(space).end(),
-            [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
 }
 
 
@@ -3741,7 +3895,6 @@
 #endif
 
   {
-    sweeping_in_progress_ = true;
     {
       GCTracer::Scope sweep_scope(heap()->tracer(),
                                   GCTracer::Scope::MC_SWEEP_OLD);
@@ -3757,9 +3910,7 @@
                                   GCTracer::Scope::MC_SWEEP_MAP);
       StartSweepSpace(heap()->map_space());
     }
-    if (FLAG_concurrent_sweeping) {
-      StartSweeperThreads();
-    }
+    sweeper().StartSweeping();
   }
 
   // Deallocate unmarked large objects.
@@ -3771,13 +3922,6 @@
   }
 }
 
-
-void MarkCompactCollector::ParallelSweepSpacesComplete() {
-  sweeping_list(heap()->old_space()).clear();
-  sweeping_list(heap()->code_space()).clear();
-  sweeping_list(heap()->map_space()).clear();
-}
-
 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
 
 
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index cd207bc..d6adb03 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -5,6 +5,8 @@
 #ifndef V8_HEAP_MARK_COMPACT_H_
 #define V8_HEAP_MARK_COMPACT_H_
 
+#include <deque>
+
 #include "src/base/bits.h"
 #include "src/heap/spaces.h"
 #include "src/heap/store-buffer.h"
@@ -400,6 +402,74 @@
  public:
   class Evacuator;
 
+  class Sweeper {
+   public:
+    class SweeperTask;
+
+    enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
+    enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
+    enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+    enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
+
+    typedef std::deque<Page*> SweepingList;
+    typedef List<Page*> SweptList;
+
+    template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
+              SkipListRebuildingMode skip_list_mode,
+              FreeSpaceTreatmentMode free_space_mode>
+    static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
+
+    explicit Sweeper(Heap* heap)
+        : heap_(heap),
+          pending_sweeper_tasks_semaphore_(0),
+          sweeping_in_progress_(false),
+          late_pages_(false),
+          num_sweeping_tasks_(0) {}
+
+    bool sweeping_in_progress() { return sweeping_in_progress_; }
+    bool contains_late_pages() { return late_pages_; }
+
+    void AddPage(AllocationSpace space, Page* page);
+    void AddLatePage(AllocationSpace space, Page* page);
+
+    int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
+                           int max_pages = 0);
+    int ParallelSweepPage(Page* page, PagedSpace* space);
+
+    void StartSweeping();
+    void StartSweepingHelper(AllocationSpace space_to_start);
+    void EnsureCompleted();
+    bool IsSweepingCompleted();
+    void SweepOrWaitUntilSweepingCompleted(Page* page);
+
+    void AddSweptPageSafe(PagedSpace* space, Page* page);
+    Page* GetSweptPageSafe(PagedSpace* space);
+
+   private:
+    static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
+
+    template <typename Callback>
+    void ForAllSweepingSpaces(Callback callback) {
+      for (int i = 0; i < kAllocationSpaces; i++) {
+        callback(static_cast<AllocationSpace>(i));
+      }
+    }
+
+    Page* GetSweepingPageSafe(AllocationSpace space);
+    void AddSweepingPageSafe(AllocationSpace space, Page* page);
+
+    void PrepareToBeSweptPage(AllocationSpace space, Page* page);
+
+    Heap* heap_;
+    base::Semaphore pending_sweeper_tasks_semaphore_;
+    base::Mutex mutex_;
+    SweptList swept_list_[kAllocationSpaces];
+    SweepingList sweeping_list_[kAllocationSpaces];
+    bool sweeping_in_progress_;
+    bool late_pages_;
+    int num_sweeping_tasks_;
+  };
+
   enum IterationMode {
     kKeepMarking,
     kClearMarkbits,
@@ -451,8 +521,6 @@
   CodeFlusher* code_flusher() { return code_flusher_; }
   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
 
-  enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
-
 #ifdef VERIFY_HEAP
   void VerifyValidStoreAndSlotsBufferEntries();
   void VerifyMarkbitsAreClean();
@@ -490,38 +558,19 @@
 
   MarkingParity marking_parity() { return marking_parity_; }
 
-  // Concurrent and parallel sweeping support. If required_freed_bytes was set
-  // to a value larger than 0, then sweeping returns after a block of at least
-  // required_freed_bytes was freed. If required_freed_bytes was set to zero
-  // then the whole given space is swept. It returns the size of the maximum
-  // continuous freed memory chunk.
-  int SweepInParallel(PagedSpace* space, int required_freed_bytes,
-                      int max_pages = 0);
-
-  // Sweeps a given page concurrently to the sweeper threads. It returns the
-  // size of the maximum continuous freed memory chunk.
-  int SweepInParallel(Page* page, PagedSpace* space);
-
   // Ensures that sweeping is finished.
   //
   // Note: Can only be called safely from main thread.
   void EnsureSweepingCompleted();
 
-  void SweepOrWaitUntilSweepingCompleted(Page* page);
-
   // Help out in sweeping the corresponding space and refill memory that has
   // been regained.
   //
   // Note: Thread-safe.
   void SweepAndRefill(CompactionSpace* space);
 
-  // If sweeper threads are not active this method will return true. If
-  // this is a latency issue we should be smarter here. Otherwise, it will
-  // return true if the sweeper threads are done processing the pages.
-  bool IsSweepingCompleted();
-
   // Checks if sweeping is in progress right now on any space.
-  bool sweeping_in_progress() { return sweeping_in_progress_; }
+  bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
 
   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
 
@@ -562,62 +611,39 @@
   // address range.
   void RemoveObjectSlots(Address start_slot, Address end_slot);
 
-  base::Mutex* swept_pages_mutex() { return &swept_pages_mutex_; }
-  List<Page*>* swept_pages(AllocationSpace id) {
-    switch (id) {
-      case OLD_SPACE:
-        return &swept_old_space_pages_;
-      case CODE_SPACE:
-        return &swept_code_space_pages_;
-      case MAP_SPACE:
-        return &swept_map_space_pages_;
-      default:
-        UNREACHABLE();
-    }
-    return nullptr;
+  Sweeper& sweeper() { return sweeper_; }
+
+  std::vector<std::pair<void*, void*>>& wrappers_to_trace() {
+    return wrappers_to_trace_;
   }
 
+  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+  EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+
+  bool UsingEmbedderHeapTracer() { return embedder_heap_tracer(); }
+
+  void TracePossibleWrapper(JSObject* js_object);
+
+  void RegisterExternallyReferencedObject(Object** object);
+
  private:
+  class EvacuateNewSpacePageVisitor;
   class EvacuateNewSpaceVisitor;
   class EvacuateOldSpaceVisitor;
+  class EvacuateRecordOnlyVisitor;
   class EvacuateVisitorBase;
   class HeapObjectVisitor;
-  class SweeperTask;
-
-  typedef std::vector<Page*> SweepingList;
 
   explicit MarkCompactCollector(Heap* heap);
 
   bool WillBeDeoptimized(Code* code);
   void ClearInvalidRememberedSetSlots();
 
-  void StartSweeperThreads();
-
   void ComputeEvacuationHeuristics(int area_size,
                                    int* target_fragmentation_percent,
                                    int* max_evacuated_bytes);
 
-#ifdef DEBUG
-  enum CollectorState {
-    IDLE,
-    PREPARE_GC,
-    MARK_LIVE_OBJECTS,
-    SWEEP_SPACES,
-    ENCODE_FORWARDING_ADDRESSES,
-    UPDATE_POINTERS,
-    RELOCATE_OBJECTS
-  };
-
-  // The current stage of the collector.
-  CollectorState state_;
-#endif
-
-  MarkingParity marking_parity_;
-
-  bool was_marked_incrementally_;
-
-  bool evacuation_;
-
   // Finishes GC, performs heap verification if enabled.
   void Finish();
 
@@ -678,8 +704,8 @@
   // or overflowed in the heap.  This respects references only considered in
   // the final atomic marking pause including the following:
   //    - Processing of objects reachable through Harmony WeakMaps.
-  //    - Objects reachable due to host application logic like object groups
-  //      or implicit references' groups.
+  //    - Objects reachable due to host application logic like object groups,
+  //      implicit references' groups, or embedder heap tracing.
   void ProcessEphemeralMarking(ObjectVisitor* visitor,
                                bool only_process_harmony_weak_collections);
 
@@ -761,8 +787,6 @@
   //          evacuation.
   //
 
-  inline SweepingList& sweeping_list(Space* space);
-
   // If we are not compacting the heap, we simply sweep the spaces except
   // for the large object space, clearing mark bits and adding unmarked
   // regions to each space's free list.
@@ -782,25 +806,20 @@
 
   // Iterates through all live objects on a page using marking information.
   // Returns whether all objects have successfully been visited.
-  bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
+  template <class Visitor>
+  bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
                         IterationMode mode);
 
   void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
 
   void RecomputeLiveBytes(MemoryChunk* page);
 
-  void SweepAbortedPages();
-
   void ReleaseEvacuationCandidates();
 
   // Starts sweeping of a space by contributing on the main thread and setting
   // up other pages for sweeping.
   void StartSweepSpace(PagedSpace* space);
 
-  // Finalizes the parallel sweeping phase. Marks all the pages that were
-  // swept in parallel.
-  void ParallelSweepSpacesComplete();
-
 #ifdef DEBUG
   friend class MarkObjectVisitor;
   static void VisitObject(HeapObject* obj);
@@ -810,39 +829,52 @@
 #endif
 
   Heap* heap_;
-  base::VirtualMemory* marking_deque_memory_;
-  size_t marking_deque_memory_committed_;
-  MarkingDeque marking_deque_;
-  CodeFlusher* code_flusher_;
-  bool have_code_to_deoptimize_;
 
-  List<Page*> evacuation_candidates_;
-  List<NewSpacePage*> newspace_evacuation_candidates_;
+  base::Semaphore page_parallel_job_semaphore_;
 
-  base::Mutex swept_pages_mutex_;
-  List<Page*> swept_old_space_pages_;
-  List<Page*> swept_code_space_pages_;
-  List<Page*> swept_map_space_pages_;
+#ifdef DEBUG
+  enum CollectorState {
+    IDLE,
+    PREPARE_GC,
+    MARK_LIVE_OBJECTS,
+    SWEEP_SPACES,
+    ENCODE_FORWARDING_ADDRESSES,
+    UPDATE_POINTERS,
+    RELOCATE_OBJECTS
+  };
 
-  SweepingList sweeping_list_old_space_;
-  SweepingList sweeping_list_code_space_;
-  SweepingList sweeping_list_map_space_;
+  // The current stage of the collector.
+  CollectorState state_;
+#endif
+
+  MarkingParity marking_parity_;
+
+  bool was_marked_incrementally_;
+
+  bool evacuation_;
 
   // True if we are collecting slots to perform evacuation from evacuation
   // candidates.
   bool compacting_;
 
-  // True if concurrent or parallel sweeping is currently in progress.
-  bool sweeping_in_progress_;
-
-  // Semaphore used to synchronize sweeper tasks.
-  base::Semaphore pending_sweeper_tasks_semaphore_;
-
-  // Semaphore used to synchronize compaction tasks.
-  base::Semaphore pending_compaction_tasks_semaphore_;
-
   bool black_allocation_;
 
+  bool have_code_to_deoptimize_;
+
+  base::VirtualMemory* marking_deque_memory_;
+  size_t marking_deque_memory_committed_;
+  MarkingDeque marking_deque_;
+  std::vector<std::pair<void*, void*>> wrappers_to_trace_;
+
+  CodeFlusher* code_flusher_;
+
+  EmbedderHeapTracer* embedder_heap_tracer_;
+
+  List<Page*> evacuation_candidates_;
+  List<Page*> newspace_evacuation_candidates_;
+
+  Sweeper sweeper_;
+
   friend class Heap;
   friend class StoreBuffer;
 };
diff --git a/src/heap/object-stats.cc b/src/heap/object-stats.cc
index c1566ab..0198c6b 100644
--- a/src/heap/object-stats.cc
+++ b/src/heap/object-stats.cc
@@ -187,14 +187,9 @@
                                                       fixed_array_size);
   }
   if (map_obj->has_code_cache()) {
-    CodeCache* cache = CodeCache::cast(map_obj->code_cache());
-    heap->object_stats_->RecordFixedArraySubTypeStats(
-        MAP_CODE_CACHE_SUB_TYPE, cache->default_cache()->Size());
-    if (!cache->normal_type_cache()->IsUndefined()) {
-      heap->object_stats_->RecordFixedArraySubTypeStats(
-          MAP_CODE_CACHE_SUB_TYPE,
-          FixedArray::cast(cache->normal_type_cache())->Size());
-    }
+    FixedArray* cache = FixedArray::cast(map_obj->code_cache());
+    heap->object_stats_->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
+                                                      cache->Size());
   }
   VisitBase(kVisitMap, map, obj);
 }
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index c415713..4373451 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -90,6 +90,11 @@
 
   table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject,
                                           kVisitJSObjectGeneric>();
+
+  // Not using specialized Api object visitor for newspace.
+  table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSApiObject,
+                                          kVisitJSApiObjectGeneric>();
+
   table_.template RegisterSpecializations<StructVisitor, kVisitStruct,
                                           kVisitStructGeneric>();
 }
@@ -200,6 +205,9 @@
   table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject,
                                           kVisitJSObjectGeneric>();
 
+  table_.template RegisterSpecializations<JSApiObjectVisitor, kVisitJSApiObject,
+                                          kVisitJSApiObjectGeneric>();
+
   table_.template RegisterSpecializations<StructObjectVisitor, kVisitStruct,
                                           kVisitStructGeneric>();
 }
@@ -265,8 +273,8 @@
   // when they might be keeping a Context alive, or when the heap is about
   // to be serialized.
   if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
-      !target->is_call_stub() && (heap->isolate()->serializer_enabled() ||
-                                  target->ic_age() != heap->global_ic_age())) {
+      (heap->isolate()->serializer_enabled() ||
+       target->ic_age() != heap->global_ic_age())) {
     ICUtility::Clear(heap->isolate(), rinfo->pc(),
                      rinfo->host()->constant_pool());
     target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -625,8 +633,7 @@
   }
 
   // We never flush code for API functions.
-  Object* function_data = shared_info->function_data();
-  if (function_data->IsFunctionTemplateInfo()) {
+  if (shared_info->IsApiFunction()) {
     return false;
   }
 
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index 0b857dc..dfde574 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -111,7 +111,6 @@
     case JS_ARRAY_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
-    case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
     case JS_TYPED_ARRAY_TYPE:
     case JS_DATA_VIEW_TYPE:
@@ -123,6 +122,10 @@
     case JS_BOUND_FUNCTION_TYPE:
       return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
                                  instance_size, has_unboxed_fields);
+    case JS_API_OBJECT_TYPE:
+    case JS_SPECIAL_API_OBJECT_TYPE:
+      return GetVisitorIdForSize(kVisitJSApiObject, kVisitJSApiObjectGeneric,
+                                 instance_size, has_unboxed_fields);
 
     case JS_FUNCTION_TYPE:
       return kVisitJSFunction;
@@ -282,7 +285,7 @@
   }
 
   static Object* WeakNext(Context* context) {
-    return context->get(Context::NEXT_CONTEXT_LINK);
+    return context->next_context_link();
   }
 
   static int WeakNextOffset() {
diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h
index 1fe8a17..4be40cd 100644
--- a/src/heap/objects-visiting.h
+++ b/src/heap/objects-visiting.h
@@ -58,6 +58,15 @@
   V(JSObject8)             \
   V(JSObject9)             \
   V(JSObjectGeneric)       \
+  V(JSApiObject2)          \
+  V(JSApiObject3)          \
+  V(JSApiObject4)          \
+  V(JSApiObject5)          \
+  V(JSApiObject6)          \
+  V(JSApiObject7)          \
+  V(JSApiObject8)          \
+  V(JSApiObject9)          \
+  V(JSApiObjectGeneric)    \
   V(Struct2)               \
   V(Struct3)               \
   V(Struct4)               \
@@ -96,9 +105,10 @@
 #define VISITOR_ID_ENUM_DECL(id) kVisit##id,
     VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
 #undef VISITOR_ID_ENUM_DECL
-    kVisitorIdCount,
+        kVisitorIdCount,
     kVisitDataObject = kVisitDataObject2,
     kVisitJSObject = kVisitJSObject2,
+    kVisitJSApiObject = kVisitJSApiObject2,
     kVisitStruct = kVisitStruct2,
   };
 
@@ -119,11 +129,12 @@
                                        int object_size,
                                        bool has_unboxed_fields) {
     DCHECK((base == kVisitDataObject) || (base == kVisitStruct) ||
-           (base == kVisitJSObject));
+           (base == kVisitJSObject) || (base == kVisitJSApiObject));
     DCHECK(IsAligned(object_size, kPointerSize));
     DCHECK(Heap::kMinObjectSizeInWords * kPointerSize <= object_size);
     DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
-    DCHECK(!has_unboxed_fields || (base == kVisitJSObject));
+    DCHECK(!has_unboxed_fields || (base == kVisitJSObject) ||
+           (base == kVisitJSApiObject));
 
     if (has_unboxed_fields) return generic;
 
@@ -400,6 +411,28 @@
   typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, void>
       JSObjectVisitor;
 
+  class JSApiObjectVisitor : AllStatic {
+   public:
+    template <int size>
+    static inline void VisitSpecialized(Map* map, HeapObject* object) {
+      TracePossibleWrapper(object);
+      JSObjectVisitor::template VisitSpecialized<size>(map, object);
+    }
+
+    INLINE(static void Visit(Map* map, HeapObject* object)) {
+      TracePossibleWrapper(object);
+      JSObjectVisitor::Visit(map, object);
+    }
+
+   private:
+    INLINE(static void TracePossibleWrapper(HeapObject* object)) {
+      if (object->GetHeap()->UsingEmbedderHeapTracer()) {
+        DCHECK(object->IsJSObject());
+        object->GetHeap()->TracePossibleWrapper(JSObject::cast(object));
+      }
+    }
+  };
+
   typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, void>
       StructObjectVisitor;
 
diff --git a/src/heap/page-parallel-job.h b/src/heap/page-parallel-job.h
index 720e288..440c440 100644
--- a/src/heap/page-parallel-job.h
+++ b/src/heap/page-parallel-job.h
@@ -33,13 +33,20 @@
 template <typename JobTraits>
 class PageParallelJob {
  public:
-  PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager)
+  // PageParallelJob cannot dynamically create a semaphore because of a bug in
+  // glibc. See http://crbug.com/609249 and
+  // https://sourceware.org/bugzilla/show_bug.cgi?id=12674.
+  // The caller must provide a semaphore with value 0 and ensure that
+  // the lifetime of the semaphore is the same as the lifetime of the Isolate.
+  // It is guaranteed that the semaphore value will be 0 after Run() call.
+  PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager,
+                  base::Semaphore* semaphore)
       : heap_(heap),
         cancelable_task_manager_(cancelable_task_manager),
         items_(nullptr),
         num_items_(0),
         num_tasks_(0),
-        pending_tasks_(new base::Semaphore(0)) {}
+        pending_tasks_(semaphore) {}
 
   ~PageParallelJob() {
     Item* item = items_;
@@ -48,7 +55,6 @@
       delete item;
       item = next;
     }
-    delete pending_tasks_;
   }
 
   void AddPage(MemoryChunk* chunk, typename JobTraits::PerPageData data) {
@@ -121,7 +127,7 @@
     Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
         : chunk(chunk), state(kAvailable), data(data), next(next) {}
     MemoryChunk* chunk;
-    AtomicValue<ProcessingState> state;
+    base::AtomicValue<ProcessingState> state;
     typename JobTraits::PerPageData data;
     Item* next;
   };
diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc
index 3f532ea..456d8a4 100644
--- a/src/heap/scavenger.cc
+++ b/src/heap/scavenger.cc
@@ -78,6 +78,10 @@
     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
                                    kVisitJSObject, kVisitJSObjectGeneric>();
 
+    table_
+        .RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+                                 kVisitJSApiObject, kVisitJSApiObjectGeneric>();
+
     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
                                    kVisitStruct, kVisitStructGeneric>();
   }
@@ -462,6 +466,7 @@
 void ScavengeVisitor::ScavengePointer(Object** p) {
   Object* object = *p;
   if (!heap_->InNewSpace(object)) return;
+
   Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
                             reinterpret_cast<HeapObject*>(object));
 }
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index 135498f..f9e40bb 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -56,8 +56,8 @@
 
 HeapObject* SemiSpaceIterator::Next() {
   while (current_ != limit_) {
-    if (NewSpacePage::IsAtEnd(current_)) {
-      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+    if (Page::IsAlignedToPageSize(current_)) {
+      Page* page = Page::FromAllocationAreaAddress(current_);
       page = page->next_page();
       DCHECK(!page->is_anchor());
       current_ = page->area_start();
@@ -80,9 +80,9 @@
 // NewSpacePageIterator
 
 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
-    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
-      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
-      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
+    : prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
+      next_page_(Page::FromAddress(space->ToSpaceStart())),
+      last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
 
 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
     : prev_page_(space->anchor()),
@@ -90,17 +90,16 @@
       last_page_(prev_page_->prev_page()) {}
 
 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
-    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
-      next_page_(NewSpacePage::FromAddress(start)),
-      last_page_(NewSpacePage::FromLimit(limit)) {
+    : prev_page_(Page::FromAddress(start)->prev_page()),
+      next_page_(Page::FromAddress(start)),
+      last_page_(Page::FromAllocationAreaAddress(limit)) {
   SemiSpace::AssertValidRange(start, limit);
 }
 
 
 bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
 
-
-NewSpacePage* NewSpacePageIterator::next() {
+Page* NewSpacePageIterator::next() {
   DCHECK(has_next());
   prev_page_ = next_page_;
   next_page_ = next_page_->next_page();
@@ -251,16 +250,15 @@
   return static_cast<AllocationSpace>(Smi::cast(object_)->value());
 }
 
-NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
-                                       Executability executable,
-                                       SemiSpace* owner) {
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+                       SemiSpace* owner) {
   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
   bool in_to_space = (owner->id() != kFromSpace);
   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
                              : MemoryChunk::IN_FROM_SPACE);
   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
                                        : MemoryChunk::IN_TO_SPACE));
-  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+  Page* page = static_cast<Page*>(chunk);
   heap->incremental_marking()->SetNewSpacePageFlags(page);
   return page;
 }
@@ -268,6 +266,7 @@
 // --------------------------------------------------------------------------
 // PagedSpace
 
+template <Page::InitializationMode mode>
 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
@@ -280,11 +279,26 @@
 
   // Make sure that categories are initialized before freeing the area.
   page->InitializeFreeListCategories();
-  owner->Free(page->area_start(), page->area_size());
+  // In the case we do not free the memory, we effectively account for the whole
+  // page as allocated memory that cannot be used for further allocations.
+  if (mode == kFreeMemory) {
+    owner->Free(page->area_start(), page->area_size());
+  }
 
   return page;
 }
 
+Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
+  DCHECK(old_page->InNewSpace());
+  old_page->set_owner(new_owner);
+  old_page->SetFlags(0, ~0);
+  new_owner->AccountCommitted(old_page->size());
+  Page* new_page = Page::Initialize<kDoNotFreeMemory>(
+      old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
+  new_page->InsertAfter(new_owner->anchor()->prev_page());
+  return new_page;
+}
+
 void Page::InitializeFreeListCategories() {
   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
     categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
@@ -325,14 +339,14 @@
 
 bool PagedSpace::Contains(Address addr) {
   Page* p = Page::FromAddress(addr);
-  if (!p->is_valid()) return false;
+  if (!Page::IsValid(p)) return false;
   return p->owner() == this;
 }
 
 bool PagedSpace::Contains(Object* o) {
   if (!o->IsHeapObject()) return false;
   Page* p = Page::FromAddress(HeapObject::cast(o)->address());
-  if (!p->is_valid()) return false;
+  if (!Page::IsValid(p)) return false;
   return p->owner() == this;
 }
 
@@ -382,8 +396,10 @@
 }
 
 void Page::ClearEvacuationCandidate() {
-  DCHECK_NULL(old_to_old_slots_);
-  DCHECK_NULL(typed_old_to_old_slots_);
+  if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
+    DCHECK_NULL(old_to_old_slots_);
+    DCHECK_NULL(typed_old_to_old_slots_);
+  }
   ClearFlag(EVACUATION_CANDIDATE);
   InitializeFreeListCategories();
 }
@@ -436,16 +452,6 @@
   return nullptr;
 }
 
-void Page::set_next_page(Page* page) {
-  DCHECK(page->owner() == owner());
-  set_next_chunk(page);
-}
-
-void Page::set_prev_page(Page* page) {
-  DCHECK(page->owner() == owner());
-  set_prev_chunk(page);
-}
-
 Page* FreeListCategory::page() {
   return Page::FromAddress(reinterpret_cast<Address>(this));
 }
@@ -668,15 +674,19 @@
   return AllocateRaw(size_in_bytes, alignment);
 }
 
-
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
+                                 Executability executable, Space* owner) {
+  if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
+    STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+    FATAL("Code page is too large.");
+  }
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
   return static_cast<LargePage*>(chunk);
 }
 
 
 intptr_t LargeObjectSpace::Available() {
-  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
+  return ObjectSizeFor(heap()->memory_allocator()->Available());
 }
 
 
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index a0a3752..e517c45 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -6,11 +6,13 @@
 
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/heap/slot-set.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
 #include "src/snapshot/snapshot.h"
+#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
@@ -62,8 +64,10 @@
   }
   cur_page = cur_page->next_page();
   if (cur_page == space_->anchor()) return false;
-  cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
-      cur_page);
+  cur_page->heap()
+      ->mark_compact_collector()
+      ->sweeper()
+      .SweepOrWaitUntilSweepingCompleted(cur_page);
   cur_addr_ = cur_page->area_start();
   cur_end_ = cur_page->area_end();
   DCHECK(cur_page->SweepingDone());
@@ -222,7 +226,7 @@
   *allocated = current.size;
   DCHECK(*allocated <= current.size);
   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!isolate_->memory_allocator()->CommitExecutableMemory(
+  if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
           code_range_, current.start, commit_size, *allocated)) {
     *allocated = 0;
     ReleaseBlock(&current);
@@ -233,7 +237,8 @@
 
 
 bool CodeRange::CommitRawMemory(Address start, size_t length) {
-  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
+  return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
+                                                            EXECUTABLE);
 }
 
 
@@ -294,15 +299,17 @@
 
 MemoryAllocator::MemoryAllocator(Isolate* isolate)
     : isolate_(isolate),
+      code_range_(nullptr),
       capacity_(0),
       capacity_executable_(0),
       size_(0),
       size_executable_(0),
       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
-      highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
+      highest_ever_allocated_(reinterpret_cast<void*>(0)),
+      unmapper_(this) {}
 
-
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
+bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
+                            intptr_t code_range_size) {
   capacity_ = RoundUp(capacity, Page::kPageSize);
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   DCHECK_GE(capacity_, capacity_executable_);
@@ -310,21 +317,84 @@
   size_ = 0;
   size_executable_ = 0;
 
+  code_range_ = new CodeRange(isolate_);
+  if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
+
   return true;
 }
 
 
 void MemoryAllocator::TearDown() {
-  for (MemoryChunk* chunk : chunk_pool_) {
+  unmapper()->WaitUntilCompleted();
+
+  MemoryChunk* chunk = nullptr;
+  while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
     FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
                NOT_EXECUTABLE);
   }
+
   // Check that spaces were torn down before MemoryAllocator.
   DCHECK_EQ(size_.Value(), 0);
   // TODO(gc) this will be true again when we fix FreeMemory.
   // DCHECK(size_executable_ == 0);
   capacity_ = 0;
   capacity_executable_ = 0;
+
+  if (last_chunk_.IsReserved()) {
+    last_chunk_.Release();
+  }
+
+  delete code_range_;
+  code_range_ = nullptr;
+}
+
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
+ public:
+  explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
+
+ private:
+  // v8::Task overrides.
+  void Run() override {
+    unmapper_->PerformFreeMemoryOnQueuedChunks();
+    unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+  }
+
+  Unmapper* unmapper_;
+  DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+};
+
+void MemoryAllocator::Unmapper::FreeQueuedChunks() {
+  if (FLAG_concurrent_sweeping) {
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
+    concurrent_unmapping_tasks_active_++;
+  } else {
+    PerformFreeMemoryOnQueuedChunks();
+  }
+}
+
+bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
+  bool waited = false;
+  while (concurrent_unmapping_tasks_active_ > 0) {
+    pending_unmapping_tasks_semaphore_.Wait();
+    concurrent_unmapping_tasks_active_--;
+    waited = true;
+  }
+  return waited;
+}
+
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
+  MemoryChunk* chunk = nullptr;
+  // Regular chunks.
+  while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
+    bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
+    allocator_->PerformFreeMemory(chunk);
+    if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+  }
+  // Non-regular chunks.
+  while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
+    allocator_->PerformFreeMemory(chunk);
+  }
 }
 
 bool MemoryAllocator::CommitMemory(Address base, size_t size,
@@ -342,12 +412,10 @@
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
   // Code which is part of the code-range does not have its own VirtualMemory.
-  DCHECK(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->contains(
-             static_cast<Address>(reservation->address())));
-  DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid() ||
-         reservation->size() <= Page::kPageSize);
+  DCHECK(code_range() == NULL ||
+         !code_range()->contains(static_cast<Address>(reservation->address())));
+  DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
+         !code_range()->valid() || reservation->size() <= Page::kPageSize);
 
   reservation->Release();
 }
@@ -356,20 +424,19 @@
 void MemoryAllocator::FreeMemory(Address base, size_t size,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
-  if (isolate_->code_range() != NULL &&
-      isolate_->code_range()->contains(static_cast<Address>(base))) {
+  if (code_range() != NULL &&
+      code_range()->contains(static_cast<Address>(base))) {
     DCHECK(executable == EXECUTABLE);
-    isolate_->code_range()->FreeRawMemory(base, size);
+    code_range()->FreeRawMemory(base, size);
   } else {
-    DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
-           !isolate_->code_range()->valid());
+    DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
+           !code_range()->valid());
     bool result = base::VirtualMemory::ReleaseRegion(base, size);
     USE(result);
     DCHECK(result);
   }
 }
 
-
 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
                                               base::VirtualMemory* controller) {
   base::VirtualMemory reservation(size, alignment);
@@ -382,7 +449,6 @@
   return base;
 }
 
-
 Address MemoryAllocator::AllocateAlignedMemory(
     size_t reserve_size, size_t commit_size, size_t alignment,
     Executability executable, base::VirtualMemory* controller) {
@@ -415,20 +481,12 @@
   return base;
 }
 
-
-void Page::InitializeAsAnchor(PagedSpace* owner) {
-  set_owner(owner);
-  set_prev_page(this);
-  set_next_page(this);
-}
-
-void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
-  set_owner(semi_space);
+void Page::InitializeAsAnchor(Space* space) {
+  set_owner(space);
   set_next_chunk(this);
   set_prev_chunk(this);
-  // Flags marks this invalid page as not being in new-space.
-  // All real new-space pages will be in new-space.
   SetFlags(0, ~0);
+  SetFlag(ANCHOR);
 }
 
 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
@@ -496,19 +554,19 @@
     if (reservation_.IsReserved()) {
       Executability executable =
           IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-      if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
-                                                               executable)) {
+      if (!heap()->memory_allocator()->CommitMemory(start, length,
+                                                    executable)) {
         return false;
       }
     } else {
-      CodeRange* code_range = heap_->isolate()->code_range();
+      CodeRange* code_range = heap_->memory_allocator()->code_range();
       DCHECK(code_range != NULL && code_range->valid() &&
              IsFlagSet(IS_EXECUTABLE));
       if (!code_range->CommitRawMemory(start, length)) return false;
     }
 
     if (Heap::ShouldZapGarbage()) {
-      heap_->isolate()->memory_allocator()->ZapBlock(start, length);
+      heap_->memory_allocator()->ZapBlock(start, length);
     }
   } else if (commit_size < committed_size) {
     DCHECK(commit_size > 0);
@@ -518,7 +576,7 @@
     if (reservation_.IsReserved()) {
       if (!reservation_.Uncommit(start, length)) return false;
     } else {
-      CodeRange* code_range = heap_->isolate()->code_range();
+      CodeRange* code_range = heap_->memory_allocator()->code_range();
       DCHECK(code_range != NULL && code_range->valid() &&
              IsFlagSet(IS_EXECUTABLE));
       if (!code_range->UncommitRawMemory(start, length)) return false;
@@ -614,13 +672,13 @@
 #ifdef V8_TARGET_ARCH_MIPS64
     // Use code range only for large object space on mips64 to keep address
     // range within 256-MB memory region.
-    if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
+    if (code_range() != NULL && code_range()->valid() &&
         reserve_area_size > CodePageAreaSize()) {
 #else
-    if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+    if (code_range() != NULL && code_range()->valid()) {
 #endif
-      base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
-                                                       &chunk_size);
+      base =
+          code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
       DCHECK(
           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
       if (base == NULL) return NULL;
@@ -674,6 +732,23 @@
     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
   }
 
+  // We cannot use the last chunk in the address space because we would
+  // overflow when comparing top and limit if this chunk is used for a
+  // linear allocation area.
+  if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
+    CHECK(!last_chunk_.IsReserved());
+    last_chunk_.TakeControl(&reservation);
+    UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
+                  last_chunk_.size());
+    size_.Increment(-static_cast<intptr_t>(chunk_size));
+    if (executable == EXECUTABLE) {
+      size_executable_.Increment(-static_cast<intptr_t>(chunk_size));
+    }
+    CHECK(last_chunk_.IsReserved());
+    return AllocateChunk(reserve_area_size, commit_area_size, executable,
+                         owner);
+  }
+
   return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
                                  executable, owner, &reservation);
 }
@@ -684,20 +759,6 @@
   available_in_free_list_ = 0;
 }
 
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
-                                              Space* owner,
-                                              Executability executable) {
-  MemoryChunk* chunk =
-      AllocateChunk(object_size, object_size, executable, owner);
-  if (chunk == NULL) return NULL;
-  if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
-    STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
-    FATAL("Code page is too large.");
-  }
-  return LargePage::Initialize(isolate_->heap(), chunk);
-}
-
-
 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
@@ -735,36 +796,52 @@
   chunk->ReleaseAllocatedMemory();
 
   base::VirtualMemory* reservation = chunk->reserved_memory();
-  if (reservation->IsReserved()) {
-    FreeMemory(reservation, chunk->executable());
+  if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
+    UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
   } else {
-    FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+    if (reservation->IsReserved()) {
+      FreeMemory(reservation, chunk->executable());
+    } else {
+      FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+    }
   }
 }
 
-template <MemoryAllocator::AllocationMode mode>
+template <MemoryAllocator::FreeMode mode>
 void MemoryAllocator::Free(MemoryChunk* chunk) {
-  if (mode == kRegular) {
-    PreFreeMemory(chunk);
-    PerformFreeMemory(chunk);
-  } else {
-    DCHECK_EQ(mode, kPooled);
-    FreePooled(chunk);
+  switch (mode) {
+    case kFull:
+      PreFreeMemory(chunk);
+      PerformFreeMemory(chunk);
+      break;
+    case kPooledAndQueue:
+      DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
+      DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
+      chunk->SetFlag(MemoryChunk::POOLED);
+    // Fall through to kPreFreeAndQueue.
+    case kPreFreeAndQueue:
+      PreFreeMemory(chunk);
+      // The chunks added to this queue will be freed by a concurrent thread.
+      unmapper()->AddMemoryChunkSafe(chunk);
+      break;
+    default:
+      UNREACHABLE();
   }
 }
 
-template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
+template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+
+template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
     MemoryChunk* chunk);
 
-template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
+template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
     MemoryChunk* chunk);
 
-template <typename PageType, MemoryAllocator::AllocationMode mode,
-          typename SpaceType>
-PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
-                                        Executability executable) {
+template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
+Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+                                    Executability executable) {
   MemoryChunk* chunk = nullptr;
-  if (mode == kPooled) {
+  if (alloc_mode == kPooled) {
     DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
     DCHECK_EQ(executable, NOT_EXECUTABLE);
     chunk = AllocatePagePooled(owner);
@@ -773,22 +850,32 @@
     chunk = AllocateChunk(size, size, executable, owner);
   }
   if (chunk == nullptr) return nullptr;
-  return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
+  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
 }
 
-template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
-                                             PagedSpace>(intptr_t, PagedSpace*,
-                                                         Executability);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+    intptr_t size, PagedSpace* owner, Executability executable);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+    intptr_t size, SemiSpace* owner, Executability executable);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+    intptr_t size, SemiSpace* owner, Executability executable);
 
-template NewSpacePage* MemoryAllocator::AllocatePage<
-    NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
-                                                       Executability);
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
+                                              LargeObjectSpace* owner,
+                                              Executability executable) {
+  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+  if (chunk == nullptr) return nullptr;
+  return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
+}
 
 template <typename SpaceType>
 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
-  if (chunk_pool_.is_empty()) return nullptr;
+  MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
+  if (chunk == nullptr) return nullptr;
   const int size = MemoryChunk::kPageSize;
-  MemoryChunk* chunk = chunk_pool_.RemoveLast();
   const Address start = reinterpret_cast<Address>(chunk);
   const Address area_start = start + MemoryChunk::kObjectStartOffset;
   const Address area_end = start + size;
@@ -802,18 +889,6 @@
   return chunk;
 }
 
-void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
-  DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
-  DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
-  chunk_pool_.Add(chunk);
-  intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
-  if (chunk->executable() == EXECUTABLE) {
-    size_executable_.Increment(-chunk_size);
-  }
-  size_.Increment(-chunk_size);
-  UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
-}
-
 bool MemoryAllocator::CommitBlock(Address start, size_t size,
                                   Executability executable) {
   if (!CommitMemory(start, size, executable)) return false;
@@ -890,10 +965,7 @@
 void MemoryAllocator::ReportStatistics() {
   intptr_t size = Size();
   float pct = static_cast<float>(capacity_ - size) / capacity_;
-  PrintF("  capacity: %" V8_PTR_PREFIX
-         "d"
-         ", used: %" V8_PTR_PREFIX
-         "d"
+  PrintF("  capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR
          ", available: %%%d\n\n",
          capacity_, size, static_cast<int>(pct * 100));
 }
@@ -959,12 +1031,16 @@
 // MemoryChunk implementation
 
 void MemoryChunk::ReleaseAllocatedMemory() {
-  delete skip_list_;
-  skip_list_ = nullptr;
-  delete mutex_;
-  mutex_ = nullptr;
-  ReleaseOldToNewSlots();
-  ReleaseOldToOldSlots();
+  if (skip_list_ != nullptr) {
+    delete skip_list_;
+    skip_list_ = nullptr;
+  }
+  if (mutex_ != nullptr) {
+    delete mutex_;
+    mutex_ = nullptr;
+  }
+  if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
+  if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
 }
 
 static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
@@ -1029,13 +1105,11 @@
 
 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
                        Executability executable)
-    : Space(heap, space, executable), free_list_(this) {
+    : Space(heap, space, executable), anchor_(this), free_list_(this) {
   area_size_ = MemoryAllocator::PageAreaSize(space);
   accounting_stats_.Clear();
 
   allocation_info_.Reset(nullptr, nullptr);
-
-  anchor_.InitializeAsAnchor(this);
 }
 
 
@@ -1048,7 +1122,7 @@
 void PagedSpace::TearDown() {
   PageIterator iterator(this);
   while (iterator.has_next()) {
-    heap()->isolate()->memory_allocator()->Free(iterator.next());
+    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(iterator.next());
   }
   anchor_.set_next_page(&anchor_);
   anchor_.set_prev_page(&anchor_);
@@ -1063,17 +1137,14 @@
     return;
   }
   MarkCompactCollector* collector = heap()->mark_compact_collector();
-  List<Page*>* swept_pages = collector->swept_pages(identity());
   intptr_t added = 0;
   {
-    base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
-    for (int i = swept_pages->length() - 1; i >= 0; --i) {
-      Page* p = (*swept_pages)[i];
+    Page* p = nullptr;
+    while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
       // Only during compaction pages can actually change ownership. This is
       // safe because there exists no other competing action on the page links
       // during compaction.
       if (is_local() && (p->owner() != this)) {
-        if (added > kCompactionMemoryWanted) break;
         base::LockGuard<base::Mutex> guard(
             reinterpret_cast<PagedSpace*>(p->owner())->mutex());
         p->Unlink();
@@ -1082,7 +1153,7 @@
       }
       added += RelinkFreeListCategories(p);
       added += p->wasted_memory();
-      swept_pages->Remove(i);
+      if (is_local() && (added > kCompactionMemoryWanted)) break;
     }
   }
   accounting_stats_.IncreaseCapacity(added);
@@ -1163,30 +1234,16 @@
 }
 
 
-bool PagedSpace::CanExpand(size_t size) {
-  DCHECK(heap()->mark_compact_collector()->is_compacting() ||
-         Capacity() <= heap()->MaxOldGenerationSize());
-
-  // Are we going to exceed capacity for this space? At this point we can be
-  // way over the maximum size because of AlwaysAllocate scopes and large
-  // objects.
-  if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
-
-  return true;
-}
-
-
 bool PagedSpace::Expand() {
-  intptr_t size = AreaSize();
+  int size = AreaSize();
   if (snapshotable() && !HasPages()) {
     size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
   }
 
-  if (!CanExpand(size)) return false;
+  if (!heap()->CanExpandOldGeneration(size)) return false;
 
-  Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
-      size, this, executable());
-  if (p == NULL) return false;
+  Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
+  if (p == nullptr) return false;
 
   AccountCommitted(static_cast<intptr_t>(p->size()));
 
@@ -1244,7 +1301,7 @@
   free_list_.EvictFreeListItems(page);
   DCHECK(!free_list_.ContainsPageFreeListItems(page));
 
-  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+  if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
     allocation_info_.Reset(nullptr, nullptr);
   }
 
@@ -1255,7 +1312,7 @@
   }
 
   AccountUncommitted(static_cast<intptr_t>(page->size()));
-  heap()->QueueMemoryChunkForFree(page);
+  heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
 
   DCHECK(Capacity() > 0);
   accounting_stats_.ShrinkSpace(AreaSize());
@@ -1273,7 +1330,7 @@
   while (page_iterator.has_next()) {
     Page* page = page_iterator.next();
     CHECK(page->owner() == this);
-    if (page == Page::FromAllocationTop(allocation_info_.top())) {
+    if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
     }
     CHECK(page->SweepingDone());
@@ -1492,14 +1549,14 @@
 
 bool NewSpace::AddFreshPage() {
   Address top = allocation_info_.top();
-  DCHECK(!NewSpacePage::IsAtStart(top));
+  DCHECK(!Page::IsAtObjectStart(top));
   if (!to_space_.AdvancePage()) {
     // No more pages left to advance.
     return false;
   }
 
   // Clear remainder of current page.
-  Address limit = NewSpacePage::FromLimit(top)->area_end();
+  Address limit = Page::FromAllocationAreaAddress(top)->area_end();
   if (heap()->gc_state() == Heap::SCAVENGE) {
     heap()->promotion_queue()->SetNewLimit(limit);
   }
@@ -1526,7 +1583,7 @@
   int filler_size = Heap::GetFillToAlign(old_top, alignment);
   int aligned_size_in_bytes = size_in_bytes + filler_size;
 
-  if (old_top + aligned_size_in_bytes >= high) {
+  if (old_top + aligned_size_in_bytes > high) {
     // Not enough room in the page, try to allocate a new one.
     if (!AddFreshPage()) {
       return false;
@@ -1537,10 +1594,9 @@
     old_top = allocation_info_.top();
     high = to_space_.page_high();
     filler_size = Heap::GetFillToAlign(old_top, alignment);
-    aligned_size_in_bytes = size_in_bytes + filler_size;
   }
 
-  DCHECK(old_top + aligned_size_in_bytes < high);
+  DCHECK(old_top + aligned_size_in_bytes <= high);
 
   if (allocation_info_.limit() < high) {
     // Either the limit has been lowered because linear allocation was disabled
@@ -1626,9 +1682,9 @@
   CHECK_EQ(current, to_space_.space_start());
 
   while (current != top()) {
-    if (!NewSpacePage::IsAtEnd(current)) {
+    if (!Page::IsAlignedToPageSize(current)) {
       // The allocation pointer should not be in the middle of an object.
-      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+      CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
             current < top());
 
       HeapObject* object = HeapObject::FromAddress(current);
@@ -1654,7 +1710,7 @@
       current += size;
     } else {
       // At end of page, switch to next page.
-      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+      Page* page = Page::FromAllocationAreaAddress(current)->next_page();
       // Next page should be valid.
       CHECK(!page->is_anchor());
       current = page->area_start();
@@ -1690,15 +1746,12 @@
 
 bool SemiSpace::Commit() {
   DCHECK(!is_committed());
-  NewSpacePage* current = anchor();
+  Page* current = anchor();
   const int num_pages = current_capacity_ / Page::kPageSize;
   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
-    NewSpacePage* new_page =
-        heap()
-            ->isolate()
-            ->memory_allocator()
-            ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
-                NewSpacePage::kAllocatableMemory, this, executable());
+    Page* new_page =
+        heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+            Page::kAllocatableMemory, this, executable());
     if (new_page == nullptr) {
       RewindPages(current, pages_added);
       return false;
@@ -1720,13 +1773,14 @@
   DCHECK(is_committed());
   NewSpacePageIterator it(this);
   while (it.has_next()) {
-    heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+    heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
         it.next());
   }
   anchor()->set_next_page(anchor());
   anchor()->set_prev_page(anchor());
   AccountUncommitted(current_capacity_);
   committed_ = false;
+  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
   return true;
 }
 
@@ -1746,21 +1800,18 @@
   if (!is_committed()) {
     if (!Commit()) return false;
   }
-  DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
   DCHECK_LE(new_capacity, maximum_capacity_);
   DCHECK_GT(new_capacity, current_capacity_);
   const int delta = new_capacity - current_capacity_;
   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-  int delta_pages = delta / NewSpacePage::kPageSize;
-  NewSpacePage* last_page = anchor()->prev_page();
+  const int delta_pages = delta / Page::kPageSize;
+  Page* last_page = anchor()->prev_page();
   DCHECK_NE(last_page, anchor());
   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
-    NewSpacePage* new_page =
-        heap()
-            ->isolate()
-            ->memory_allocator()
-            ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
-                NewSpacePage::kAllocatableMemory, this, executable());
+    Page* new_page =
+        heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+            Page::kAllocatableMemory, this, executable());
     if (new_page == nullptr) {
       RewindPages(last_page, pages_added);
       return false;
@@ -1768,8 +1819,7 @@
     new_page->InsertAfter(last_page);
     Bitmap::Clear(new_page);
     // Duplicate the flags that was set on the old page.
-    new_page->SetFlags(last_page->GetFlags(),
-                       NewSpacePage::kCopyOnFlipFlagsMask);
+    new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
     last_page = new_page;
   }
   AccountCommitted(static_cast<intptr_t>(delta));
@@ -1777,9 +1827,9 @@
   return true;
 }
 
-void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
-  NewSpacePage* new_last_page = nullptr;
-  NewSpacePage* last_page = start;
+void SemiSpace::RewindPages(Page* start, int num_pages) {
+  Page* new_last_page = nullptr;
+  Page* last_page = start;
   while (num_pages > 0) {
     DCHECK_NE(last_page, anchor());
     new_last_page = last_page->prev_page();
@@ -1791,25 +1841,26 @@
 }
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
-  DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
   DCHECK_GE(new_capacity, minimum_capacity_);
   DCHECK_LT(new_capacity, current_capacity_);
   if (is_committed()) {
     const int delta = current_capacity_ - new_capacity;
     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-    int delta_pages = delta / NewSpacePage::kPageSize;
-    NewSpacePage* new_last_page;
-    NewSpacePage* last_page;
+    int delta_pages = delta / Page::kPageSize;
+    Page* new_last_page;
+    Page* last_page;
     while (delta_pages > 0) {
       last_page = anchor()->prev_page();
       new_last_page = last_page->prev_page();
       new_last_page->set_next_page(anchor());
       anchor()->set_prev_page(new_last_page);
-      heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+      heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
           last_page);
       delta_pages--;
     }
     AccountUncommitted(static_cast<intptr_t>(delta));
+    heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
   }
   current_capacity_ = new_capacity;
   return true;
@@ -1817,13 +1868,12 @@
 
 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
   anchor_.set_owner(this);
-  // Fixup back-pointers to anchor. Address of anchor changes when we swap.
   anchor_.prev_page()->set_next_page(&anchor_);
   anchor_.next_page()->set_prev_page(&anchor_);
 
   NewSpacePageIterator it(this);
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     page->set_owner(this);
     page->SetFlags(flags, mask);
     if (id_ == kToSpace) {
@@ -1846,6 +1896,22 @@
   current_page_ = anchor_.next_page();
 }
 
+bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
+  // TODO(mlippautz): We do not have to get a new page here when the semispace
+  // is uncommitted later on.
+  Page* new_page = heap()->memory_allocator()->AllocatePage(
+      Page::kAllocatableMemory, this, executable());
+  if (new_page == nullptr) return false;
+  Bitmap::Clear(new_page);
+  new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
+  new_page->set_next_page(old_page->next_page());
+  new_page->set_prev_page(old_page->prev_page());
+  old_page->next_page()->set_prev_page(new_page);
+  old_page->prev_page()->set_next_page(new_page);
+  heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
+                               ClearRecordedSlots::kNo);
+  return true;
+}
 
 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
   // We won't be swapping semispaces without data in them.
@@ -1863,13 +1929,13 @@
   std::swap(from->anchor_, to->anchor_);
   std::swap(from->current_page_, to->current_page_);
 
-  to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
+  to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
   from->FixPagesFlags(0, 0);
 }
 
 
 void SemiSpace::set_age_mark(Address mark) {
-  DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
+  DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
   age_mark_ = mark;
   // Mark all pages up to the one containing mark.
   NewSpacePageIterator it(space_start(), mark);
@@ -1886,10 +1952,10 @@
 #ifdef VERIFY_HEAP
 void SemiSpace::Verify() {
   bool is_from_space = (id_ == kFromSpace);
-  NewSpacePage* page = anchor_.next_page();
-  CHECK(anchor_.semi_space() == this);
+  Page* page = anchor_.next_page();
+  CHECK(anchor_.owner() == this);
   while (page != &anchor_) {
-    CHECK_EQ(page->semi_space(), this);
+    CHECK_EQ(page->owner(), this);
     CHECK(page->InNewSpace());
     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
                                         : MemoryChunk::IN_TO_SPACE));
@@ -1917,10 +1983,10 @@
 #ifdef DEBUG
 void SemiSpace::AssertValidRange(Address start, Address end) {
   // Addresses belong to same semi-space
-  NewSpacePage* page = NewSpacePage::FromLimit(start);
-  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
-  SemiSpace* space = page->semi_space();
-  CHECK_EQ(space, end_page->semi_space());
+  Page* page = Page::FromAllocationAreaAddress(start);
+  Page* end_page = Page::FromAllocationAreaAddress(end);
+  SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
+  CHECK_EQ(space, end_page->owner());
   // Start address is before end address, either on same page,
   // or end address is on a later page in the linked list of
   // semi-space pages.
@@ -2087,9 +2153,7 @@
 #ifdef DEBUG
   if (FLAG_heap_stats) {
     float pct = static_cast<float>(Available()) / TotalCapacity();
-    PrintF("  capacity: %" V8_PTR_PREFIX
-           "d"
-           ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+    PrintF("  capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n",
            TotalCapacity(), Available(), static_cast<int>(pct * 100));
     PrintF("\n  Object Histogram:\n");
     for (int i = 0; i <= LAST_TYPE; i++) {
@@ -2382,7 +2446,6 @@
   int new_node_size = 0;
   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == nullptr) return nullptr;
-  owner_->AllocationStep(new_node->address(), size_in_bytes);
 
   int bytes_left = new_node_size - size_in_bytes;
   DCHECK(bytes_left >= 0);
@@ -2409,7 +2472,8 @@
     // Keep the linear allocation area empty if requested to do so, just
     // return area back to the free list instead.
     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
-    DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
+    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+                           new_node->address() + size_in_bytes);
   } else if (bytes_left > kThreshold &&
              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
              FLAG_incremental_marking) {
@@ -2421,13 +2485,16 @@
                  new_node_size - size_in_bytes - linear_size);
     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
                            new_node->address() + size_in_bytes + linear_size);
-  } else if (bytes_left > 0) {
+  } else {
+    DCHECK(bytes_left >= 0);
     // Normally we give the rest of the node to the allocator as its new
     // linear allocation area.
     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
                            new_node->address() + new_node_size);
   }
 
+  owner_->AllocationStep(new_node->address(), size_in_bytes);
+
   return new_node;
 }
 
@@ -2596,7 +2663,7 @@
 void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
   if (allocation_info_.top() >= allocation_info_.limit()) return;
 
-  if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
+  if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
     // Create filler object to keep page iterable if it was iterable.
     int remaining =
         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
@@ -2648,8 +2715,8 @@
     if (object != NULL) return object;
 
     // If sweeping is still in progress try to sweep pages on the main thread.
-    int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
-                                               size_in_bytes, kMaxPagesToSweep);
+    int max_freed = collector->sweeper().ParallelSweepSpace(
+        identity(), size_in_bytes, kMaxPagesToSweep);
     RefillFreeList();
     if (max_freed >= size_in_bytes) {
       object = free_list_.Allocate(size_in_bytes);
@@ -2815,11 +2882,8 @@
 
 void PagedSpace::ReportStatistics() {
   int pct = static_cast<int>(Available() * 100 / Capacity());
-  PrintF("  capacity: %" V8_PTR_PREFIX
-         "d"
-         ", waste: %" V8_PTR_PREFIX
-         "d"
-         ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+  PrintF("  capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR
+         ", available: %" V8PRIdPTR ", %%%d\n",
          Capacity(), Waste(), Available(), pct);
 
   if (heap()->mark_compact_collector()->sweeping_in_progress()) {
@@ -2892,9 +2956,9 @@
     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
 
     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
-    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+    heap()->memory_allocator()->PerformAllocationCallback(
         space, kAllocationActionFree, page->size());
-    heap()->isolate()->memory_allocator()->Free(page);
+    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
   }
   SetUp();
 }
@@ -2908,7 +2972,7 @@
     return AllocationResult::Retry(identity());
   }
 
-  LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
+  LargePage* page = heap()->memory_allocator()->AllocateLargePage(
       object_size, this, executable);
   if (page == NULL) return AllocationResult::Retry(identity());
   DCHECK(page->area_size() >= object_size);
@@ -2977,7 +3041,7 @@
   if (e != NULL) {
     DCHECK(e->value != NULL);
     LargePage* page = reinterpret_cast<LargePage*>(e->value);
-    DCHECK(page->is_valid());
+    DCHECK(LargePage::IsValid(page));
     if (page->Contains(a)) {
       return page;
     }
@@ -3037,7 +3101,7 @@
                           static_cast<uint32_t>(key));
       }
 
-      heap()->QueueMemoryChunkForFree(page);
+      heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
     }
   }
 }
@@ -3115,7 +3179,7 @@
 
 
 void LargeObjectSpace::ReportStatistics() {
-  PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
+  PrintF("  size: %" V8PRIdPTR "\n", size_);
   int num_objects = 0;
   ClearHistograms(heap()->isolate());
   LargeObjectIterator it(this);
@@ -3126,7 +3190,7 @@
 
   PrintF(
       "  number of objects %d, "
-      "size of objects %" V8_PTR_PREFIX "d\n",
+      "size of objects %" V8PRIdPTR "\n",
       num_objects, objects_size_);
   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
 }
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index 93a81cc..67e9aae 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -5,8 +5,10 @@
 #ifndef V8_HEAP_SPACES_H_
 #define V8_HEAP_SPACES_H_
 
+#include <list>
+
 #include "src/allocation.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
 #include "src/base/atomicops.h"
 #include "src/base/bits.h"
 #include "src/base/platform/mutex.h"
@@ -27,7 +29,6 @@
 class Isolate;
 class MemoryAllocator;
 class MemoryChunk;
-class NewSpacePage;
 class Page;
 class PagedSpace;
 class SemiSpace;
@@ -419,6 +420,10 @@
     // to grey transition is performed in the value.
     HAS_PROGRESS_BAR,
 
+    // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
+    // from new to old space during evacuation.
+    PAGE_NEW_OLD_PROMOTION,
+
     // A black page has all mark bits set to 1 (black). A black page currently
     // cannot be iterated because it is not swept. Moreover live bytes are also
     // not updated.
@@ -437,10 +442,17 @@
     // still has to be performed.
     PRE_FREED,
 
+    // |POOLED|: When actually freeing this chunk, only uncommit and do not
+    // give up the reservation as we still reuse the chunk at some point.
+    POOLED,
+
     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
     //   has been aborted and needs special handling by the sweeper.
     COMPACTION_WAS_ABORTED,
 
+    // |ANCHOR|: Flag is set if page is an anchor.
+    ANCHOR,
+
     // Last flag, keep at bottom.
     NUM_MEMORY_CHUNK_FLAGS
   };
@@ -548,7 +560,7 @@
     if (mark == nullptr) return;
     // Need to subtract one from the mark because when a chunk is full the
     // top points to the next address after the chunk, which effectively belongs
-    // to another chunk. See the comment to Page::FromAllocationTop.
+    // to another chunk. See the comment to Page::FromTopOrLimit.
     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
     intptr_t old_mark = 0;
@@ -558,9 +570,9 @@
              !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
   }
 
-  Address address() { return reinterpret_cast<Address>(this); }
+  static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
 
-  bool is_valid() { return address() != NULL; }
+  Address address() { return reinterpret_cast<Address>(this); }
 
   base::Mutex* mutex() { return mutex_; }
 
@@ -574,7 +586,7 @@
     return addr >= area_start() && addr <= area_end();
   }
 
-  AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
+  base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
     return concurrent_sweeping_;
   }
 
@@ -788,20 +800,20 @@
 
   // Assuming the initial allocation on a page is sequential,
   // count highest number of bytes ever allocated on the page.
-  AtomicValue<intptr_t> high_water_mark_;
+  base::AtomicValue<intptr_t> high_water_mark_;
 
   base::Mutex* mutex_;
 
-  AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
+  base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
 
   // PagedSpace free-list statistics.
-  AtomicNumber<intptr_t> available_in_free_list_;
-  AtomicNumber<intptr_t> wasted_memory_;
+  base::AtomicNumber<intptr_t> available_in_free_list_;
+  base::AtomicNumber<intptr_t> wasted_memory_;
 
   // next_chunk_ holds a pointer of type MemoryChunk
-  AtomicValue<MemoryChunk*> next_chunk_;
+  base::AtomicValue<MemoryChunk*> next_chunk_;
   // prev_chunk_ holds a pointer of type MemoryChunk
-  AtomicValue<MemoryChunk*> prev_chunk_;
+  base::AtomicValue<MemoryChunk*> prev_chunk_;
 
   FreeListCategory categories_[kNumberOfCategories];
 
@@ -817,59 +829,15 @@
 //
 // The only way to get a page pointer is by calling factory methods:
 //   Page* p = Page::FromAddress(addr); or
-//   Page* p = Page::FromAllocationTop(top);
+//   Page* p = Page::FromTopOrLimit(top);
 class Page : public MemoryChunk {
  public:
-  // Returns the page containing a given address. The address ranges
-  // from [page_addr .. page_addr + kPageSize[
-  // This only works if the object is in fact in a page.  See also MemoryChunk::
-  // FromAddress() and FromAnyAddress().
-  INLINE(static Page* FromAddress(Address a)) {
-    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
-  }
+  static const intptr_t kCopyAllFlags = ~0;
 
-  // Only works for addresses in pointer spaces, not code space.
-  inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
-
-  // Returns the page containing an allocation top. Because an allocation
-  // top address can be the upper bound of the page, we need to subtract
-  // it with kPointerSize first. The address ranges from
-  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
-  INLINE(static Page* FromAllocationTop(Address top)) {
-    Page* p = FromAddress(top - kPointerSize);
-    return p;
-  }
-
-  // Returns the next page in the chain of pages owned by a space.
-  inline Page* next_page() {
-    DCHECK(next_chunk()->owner() == owner());
-    return static_cast<Page*>(next_chunk());
-  }
-  inline Page* prev_page() {
-    DCHECK(prev_chunk()->owner() == owner());
-    return static_cast<Page*>(prev_chunk());
-  }
-  inline void set_next_page(Page* page);
-  inline void set_prev_page(Page* page);
-
-  // Checks whether an address is page aligned.
-  static bool IsAlignedToPageSize(Address a) {
-    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
-  }
-
-  // Returns the offset of a given address to this page.
-  INLINE(int Offset(Address a)) {
-    int offset = static_cast<int>(a - address());
-    return offset;
-  }
-
-  // Returns the address for a given offset to the this page.
-  Address OffsetToAddress(int offset) {
-    DCHECK_PAGE_OFFSET(offset);
-    return address() + offset;
-  }
-
-  // ---------------------------------------------------------------------
+  // Page flags copied from from-space to to-space when flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
   // Maximum object size that gets allocated into regular pages. Objects larger
   // than that size are allocated in large object space and are never moved in
@@ -880,12 +848,71 @@
   // short living objects >256K.
   static const int kMaxRegularHeapObjectSize = 600 * KB;
 
-  inline void ClearGCFields();
+  static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
 
-  static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
-                                 Executability executable, PagedSpace* owner);
+  // Returns the page containing a given address. The address ranges
+  // from [page_addr .. page_addr + kPageSize[. This only works if the object
+  // is in fact in a page.
+  static Page* FromAddress(Address addr) {
+    return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
+  }
 
-  void InitializeAsAnchor(PagedSpace* owner);
+  // Returns the page containing the address provided. The address can
+  // potentially point righter after the page. To be also safe for tagged values
+  // we subtract a hole word. The valid address ranges from
+  // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
+  static Page* FromAllocationAreaAddress(Address address) {
+    return Page::FromAddress(address - kPointerSize);
+  }
+
+  // Checks if address1 and address2 are on the same new space page.
+  static bool OnSamePage(Address address1, Address address2) {
+    return Page::FromAddress(address1) == Page::FromAddress(address2);
+  }
+
+  // Checks whether an address is page aligned.
+  static bool IsAlignedToPageSize(Address addr) {
+    return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
+  }
+
+  static bool IsAtObjectStart(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
+           kObjectStartOffset;
+  }
+
+  inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
+
+  // Create a Page object that is only used as anchor for the doubly-linked
+  // list of real pages.
+  explicit Page(Space* owner) { InitializeAsAnchor(owner); }
+
+  inline void MarkNeverAllocateForTesting();
+  inline void MarkEvacuationCandidate();
+  inline void ClearEvacuationCandidate();
+
+  Page* next_page() { return static_cast<Page*>(next_chunk()); }
+  Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
+  void set_next_page(Page* page) { set_next_chunk(page); }
+  void set_prev_page(Page* page) { set_prev_chunk(page); }
+
+  template <typename Callback>
+  inline void ForAllFreeListCategories(Callback callback) {
+    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+      callback(&categories_[i]);
+    }
+  }
+
+  // Returns the offset of a given address to this page.
+  inline int Offset(Address a) {
+    int offset = static_cast<int>(a - address());
+    return offset;
+  }
+
+  // Returns the address for a given offset to the this page.
+  Address OffsetToAddress(int offset) {
+    DCHECK_PAGE_OFFSET(offset);
+    return address() + offset;
+  }
 
   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
   // progress. In particular, when we know that right before this call a
@@ -907,42 +934,39 @@
                             available_in_free_list());
   }
 
-  template <typename Callback>
-  inline void ForAllFreeListCategories(Callback callback) {
-    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-      callback(&categories_[i]);
-    }
-  }
-
   FreeListCategory* free_list_category(FreeListCategoryType type) {
     return &categories_[type];
   }
 
-#define FRAGMENTATION_STATS_ACCESSORS(type, name)        \
-  type name() { return name##_.Value(); }                \
-  void set_##name(type name) { name##_.SetValue(name); } \
-  void add_##name(type name) { name##_.Increment(name); }
+  bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
 
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
-
-#undef FRAGMENTATION_STATS_ACCESSORS
+  intptr_t wasted_memory() { return wasted_memory_.Value(); }
+  void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
+  intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
+  void add_available_in_free_list(intptr_t available) {
+    available_in_free_list_.Increment(available);
+  }
 
 #ifdef DEBUG
   void Print();
 #endif  // DEBUG
 
-  inline void MarkNeverAllocateForTesting();
-  inline void MarkEvacuationCandidate();
-  inline void ClearEvacuationCandidate();
-
  private:
+  enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
+
+  template <InitializationMode mode = kFreeMemory>
+  static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+                                 Executability executable, PagedSpace* owner);
+  static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+                                 Executability executable, SemiSpace* owner);
+
   inline void InitializeFreeListCategories();
 
+  void InitializeAsAnchor(Space* owner);
+
   friend class MemoryAllocator;
 };
 
-
 class LargePage : public MemoryChunk {
  public:
   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
@@ -960,7 +984,8 @@
   static const int kMaxCodePageSize = 512 * MB;
 
  private:
-  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
+                                      Executability executable, Space* owner);
 
   friend class MemoryAllocator;
 };
@@ -1036,11 +1061,6 @@
     }
   }
 
-#ifdef DEBUG
-  virtual void Print() = 0;
-#endif
-
- protected:
   void AccountCommitted(intptr_t bytes) {
     DCHECK_GE(bytes, 0);
     committed_ += bytes;
@@ -1055,6 +1075,11 @@
     DCHECK_GE(committed_, 0);
   }
 
+#ifdef DEBUG
+  virtual void Print() = 0;
+#endif
+
+ protected:
   v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
   bool allocation_observers_paused_;
 
@@ -1242,45 +1267,113 @@
 // A space acquires chunks of memory from the operating system. The memory
 // allocator allocated and deallocates pages for the paged heap spaces and large
 // pages for large object space.
-//
-// Each space has to manage it's own pages.
-//
 class MemoryAllocator {
  public:
+  // Unmapper takes care of concurrently unmapping and uncommitting memory
+  // chunks.
+  class Unmapper {
+   public:
+    class UnmapFreeMemoryTask;
+
+    explicit Unmapper(MemoryAllocator* allocator)
+        : allocator_(allocator),
+          pending_unmapping_tasks_semaphore_(0),
+          concurrent_unmapping_tasks_active_(0) {}
+
+    void AddMemoryChunkSafe(MemoryChunk* chunk) {
+      if ((chunk->size() == Page::kPageSize) &&
+          (chunk->executable() != EXECUTABLE)) {
+        AddMemoryChunkSafe<kRegular>(chunk);
+      } else {
+        AddMemoryChunkSafe<kNonRegular>(chunk);
+      }
+    }
+
+    MemoryChunk* TryGetPooledMemoryChunkSafe() {
+      // Procedure:
+      // (1) Try to get a chunk that was declared as pooled and already has
+      // been uncommitted.
+      // (2) Try to steal any memory chunk of kPageSize that would've been
+      // unmapped.
+      MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
+      if (chunk == nullptr) {
+        chunk = GetMemoryChunkSafe<kRegular>();
+        if (chunk != nullptr) {
+          // For stolen chunks we need to manually free any allocated memory.
+          chunk->ReleaseAllocatedMemory();
+        }
+      }
+      return chunk;
+    }
+
+    void FreeQueuedChunks();
+    bool WaitUntilCompleted();
+
+   private:
+    enum ChunkQueueType {
+      kRegular,     // Pages of kPageSize that do not live in a CodeRange and
+                    // can thus be used for stealing.
+      kNonRegular,  // Large chunks and executable chunks.
+      kPooled,      // Pooled chunks, already uncommited and ready for reuse.
+      kNumberOfChunkQueues,
+    };
+
+    template <ChunkQueueType type>
+    void AddMemoryChunkSafe(MemoryChunk* chunk) {
+      base::LockGuard<base::Mutex> guard(&mutex_);
+      chunks_[type].push_back(chunk);
+    }
+
+    template <ChunkQueueType type>
+    MemoryChunk* GetMemoryChunkSafe() {
+      base::LockGuard<base::Mutex> guard(&mutex_);
+      if (chunks_[type].empty()) return nullptr;
+      MemoryChunk* chunk = chunks_[type].front();
+      chunks_[type].pop_front();
+      return chunk;
+    }
+
+    void PerformFreeMemoryOnQueuedChunks();
+
+    base::Mutex mutex_;
+    MemoryAllocator* allocator_;
+    std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+    base::Semaphore pending_unmapping_tasks_semaphore_;
+    intptr_t concurrent_unmapping_tasks_active_;
+
+    friend class MemoryAllocator;
+  };
+
   enum AllocationMode {
     kRegular,
     kPooled,
   };
+  enum FreeMode {
+    kFull,
+    kPreFreeAndQueue,
+    kPooledAndQueue,
+  };
 
   explicit MemoryAllocator(Isolate* isolate);
 
   // Initializes its internal bookkeeping structures.
   // Max capacity of the total space and executable memory limit.
-  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
+  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
+             intptr_t code_range_size);
 
   void TearDown();
 
-  // Allocates either Page or NewSpacePage from the allocator. AllocationMode
-  // is used to indicate whether pooled allocation, which only works for
-  // MemoryChunk::kPageSize, should be tried first.
-  template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
+  // Allocates a Page from the allocator. AllocationMode is used to indicate
+  // whether pooled allocation, which only works for MemoryChunk::kPageSize,
+  // should be tried first.
+  template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
             typename SpaceType>
-  PageType* AllocatePage(intptr_t size, SpaceType* owner,
-                         Executability executable);
+  Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
 
-  LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
+  LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
                                Executability executable);
 
-  // PreFree logically frees the object, i.e., it takes care of the size
-  // bookkeeping and calls the allocation callback.
-  void PreFreeMemory(MemoryChunk* chunk);
-
-  // FreeMemory can be called concurrently when PreFree was executed before.
-  void PerformFreeMemory(MemoryChunk* chunk);
-
-  // Free is a wrapper method. For kRegular AllocationMode it  calls PreFree and
-  // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
-  template <MemoryAllocator::AllocationMode mode = kRegular>
+  template <MemoryAllocator::FreeMode mode = kFull>
   void Free(MemoryChunk* chunk);
 
   // Returns allocated spaces in bytes.
@@ -1385,34 +1478,43 @@
                                               Address start, size_t commit_size,
                                               size_t reserved_size);
 
+  CodeRange* code_range() { return code_range_; }
+  Unmapper* unmapper() { return &unmapper_; }
+
  private:
+  // PreFree logically frees the object, i.e., it takes care of the size
+  // bookkeeping and calls the allocation callback.
+  void PreFreeMemory(MemoryChunk* chunk);
+
+  // FreeMemory can be called concurrently when PreFree was executed before.
+  void PerformFreeMemory(MemoryChunk* chunk);
+
   // See AllocatePage for public interface. Note that currently we only support
   // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
   template <typename SpaceType>
   MemoryChunk* AllocatePagePooled(SpaceType* owner);
 
-  // Free that chunk into the pool.
-  void FreePooled(MemoryChunk* chunk);
-
   Isolate* isolate_;
 
+  CodeRange* code_range_;
+
   // Maximum space size in bytes.
   intptr_t capacity_;
   // Maximum subset of capacity_ that can be executable
   intptr_t capacity_executable_;
 
   // Allocated space size in bytes.
-  AtomicNumber<intptr_t> size_;
+  base::AtomicNumber<intptr_t> size_;
   // Allocated executable space size in bytes.
-  AtomicNumber<intptr_t> size_executable_;
+  base::AtomicNumber<intptr_t> size_executable_;
 
   // We keep the lowest and highest addresses allocated as a quick way
   // of determining that pointers are outside the heap. The estimate is
   // conservative, i.e. not all addrsses in 'allocated' space are allocated
   // to our heap. The range is [lowest, highest[, inclusive on the low end
   // and exclusive on the high end.
-  AtomicValue<void*> lowest_ever_allocated_;
-  AtomicValue<void*> highest_ever_allocated_;
+  base::AtomicValue<void*> lowest_ever_allocated_;
+  base::AtomicValue<void*> highest_ever_allocated_;
 
   struct MemoryAllocationCallbackRegistration {
     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -1447,7 +1549,10 @@
     } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
   }
 
-  List<MemoryChunk*> chunk_pool_;
+  base::VirtualMemory last_chunk_;
+  Unmapper unmapper_;
+
+  friend class TestCodeRangeScope;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
 };
@@ -1572,7 +1677,8 @@
 
 #ifdef DEBUG
   bool VerifyPagedAllocation() {
-    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
+    return (Page::FromAllocationAreaAddress(top_) ==
+            Page::FromAllocationAreaAddress(limit_)) &&
            (top_ <= limit_);
   }
 #endif
@@ -1872,7 +1978,7 @@
   FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
 
   PagedSpace* owner_;
-  AtomicNumber<intptr_t> wasted_bytes_;
+  base::AtomicNumber<intptr_t> wasted_bytes_;
   FreeListCategory* categories_[kNumberOfCategories];
 
   friend class FreeListCategory;
@@ -2152,12 +2258,6 @@
   static void ResetCodeStatistics(Isolate* isolate);
 #endif
 
-  // This function tries to steal size_in_bytes memory from the sweeper threads
-  // free-lists. If it does not succeed stealing enough memory, it will wait
-  // for the sweeper threads to finish sweeping.
-  // It returns true when sweeping is completed and false otherwise.
-  bool EnsureSweeperProgress(intptr_t size_in_bytes);
-
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
 
@@ -2283,83 +2383,8 @@
   const char* name_;
 };
 
-
 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
 
-
-class NewSpacePage : public MemoryChunk {
- public:
-  static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
-                                         Executability executable,
-                                         SemiSpace* owner);
-
-  static bool IsAtStart(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
-           kObjectStartOffset;
-  }
-
-  static bool IsAtEnd(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
-  }
-
-  // Finds the NewSpacePage containing the given address.
-  static inline NewSpacePage* FromAddress(Address address_in_page) {
-    Address page_start =
-        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
-                                  ~Page::kPageAlignmentMask);
-    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
-    return page;
-  }
-
-  // Find the page for a limit address. A limit address is either an address
-  // inside a page, or the address right after the last byte of a page.
-  static inline NewSpacePage* FromLimit(Address address_limit) {
-    return NewSpacePage::FromAddress(address_limit - 1);
-  }
-
-  // Checks if address1 and address2 are on the same new space page.
-  static inline bool OnSamePage(Address address1, Address address2) {
-    return NewSpacePage::FromAddress(address1) ==
-           NewSpacePage::FromAddress(address2);
-  }
-
-  inline NewSpacePage* next_page() {
-    return static_cast<NewSpacePage*>(next_chunk());
-  }
-
-  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
-
-  inline NewSpacePage* prev_page() {
-    return static_cast<NewSpacePage*>(prev_chunk());
-  }
-
-  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
-
-  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
-
-  bool is_anchor() { return !this->InNewSpace(); }
-
- private:
-  // GC related flags copied from from-space to to-space when
-  // flipping semispaces.
-  static const intptr_t kCopyOnFlipFlagsMask =
-      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
-  // Create a NewSpacePage object that is only used as anchor
-  // for the doubly-linked list of real pages.
-  explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
-
-  // Intialize a fake NewSpacePage used as sentinel at the ends
-  // of a doubly-linked list of real NewSpacePages.
-  // Only uses the prev/next links, and sets flags to not be in new-space.
-  void InitializeAsAnchor(SemiSpace* owner);
-
-  friend class SemiSpace;
-  friend class SemiSpaceIterator;
-};
-
-
 // -----------------------------------------------------------------------------
 // SemiSpace in young generation
 //
@@ -2408,8 +2433,8 @@
     return anchor_.next_page()->area_start();
   }
 
-  NewSpacePage* first_page() { return anchor_.next_page(); }
-  NewSpacePage* current_page() { return current_page_; }
+  Page* first_page() { return anchor_.next_page(); }
+  Page* current_page() { return current_page_; }
 
   // Returns one past the end address of the space.
   Address space_end() { return anchor_.prev_page()->area_end(); }
@@ -2421,7 +2446,7 @@
   Address page_high() { return current_page_->area_end(); }
 
   bool AdvancePage() {
-    NewSpacePage* next_page = current_page_->next_page();
+    Page* next_page = current_page_->next_page();
     if (next_page == anchor()) return false;
     current_page_ = next_page;
     return true;
@@ -2430,6 +2455,8 @@
   // Resets the space to using the first page.
   void Reset();
 
+  bool ReplaceWithEmptyPage(Page* page);
+
   // Age mark accessors.
   Address age_mark() { return age_mark_; }
   void set_age_mark(Address mark);
@@ -2479,9 +2506,9 @@
 #endif
 
  private:
-  void RewindPages(NewSpacePage* start, int num_pages);
+  void RewindPages(Page* start, int num_pages);
 
-  inline NewSpacePage* anchor() { return &anchor_; }
+  inline Page* anchor() { return &anchor_; }
 
   // Copies the flags into the masked positions on all pages in the space.
   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
@@ -2501,8 +2528,8 @@
   bool committed_;
   SemiSpaceId id_;
 
-  NewSpacePage anchor_;
-  NewSpacePage* current_page_;
+  Page anchor_;
+  Page* current_page_;
 
   friend class SemiSpaceIterator;
   friend class NewSpacePageIterator;
@@ -2550,15 +2577,15 @@
   inline NewSpacePageIterator(Address start, Address limit);
 
   inline bool has_next();
-  inline NewSpacePage* next();
+  inline Page* next();
 
  private:
-  NewSpacePage* prev_page_;  // Previous page returned.
+  Page* prev_page_;  // Previous page returned.
   // Next page that will be returned.  Cached here so that we can use this
   // iterator for operations that deallocate pages.
-  NewSpacePage* next_page_;
+  Page* next_page_;
   // Last page returned.
-  NewSpacePage* last_page_;
+  Page* last_page_;
 };
 
 
@@ -2607,7 +2634,7 @@
 
   // Return the allocated bytes in the active semispace.
   intptr_t Size() override {
-    return pages_used_ * NewSpacePage::kAllocatableMemory +
+    return pages_used_ * Page::kAllocatableMemory +
            static_cast<int>(top() - to_space_.page_low());
   }
 
@@ -2620,7 +2647,7 @@
   intptr_t Capacity() {
     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
     return (to_space_.current_capacity() / Page::kPageSize) *
-           NewSpacePage::kAllocatableMemory;
+           Page::kAllocatableMemory;
   }
 
   // Return the current size of a semispace, allocatable and non-allocatable
@@ -2650,9 +2677,9 @@
   size_t AllocatedSinceLastGC() {
     bool seen_age_mark = false;
     Address age_mark = to_space_.age_mark();
-    NewSpacePage* current_page = to_space_.first_page();
-    NewSpacePage* age_mark_page = NewSpacePage::FromAddress(age_mark);
-    NewSpacePage* last_page = NewSpacePage::FromAddress(top() - kPointerSize);
+    Page* current_page = to_space_.first_page();
+    Page* age_mark_page = Page::FromAddress(age_mark);
+    Page* last_page = Page::FromAddress(top() - kPointerSize);
     if (age_mark_page == last_page) {
       if (top() - age_mark >= 0) {
         return top() - age_mark;
@@ -2675,7 +2702,7 @@
     DCHECK_EQ(current_page, age_mark_page);
     current_page = age_mark_page->next_page();
     while (current_page != last_page) {
-      allocated += NewSpacePage::kAllocatableMemory;
+      allocated += Page::kAllocatableMemory;
       current_page = current_page->next_page();
     }
     allocated += top() - current_page->area_start();
@@ -2684,6 +2711,12 @@
     return static_cast<size_t>(allocated);
   }
 
+  bool ReplaceWithEmptyPage(Page* page) {
+    // This method is called after flipping the semispace.
+    DCHECK(page->InFromSpace());
+    return from_space_.ReplaceWithEmptyPage(page);
+  }
+
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
diff --git a/src/i18n.cc b/src/i18n.cc
index 623de50..b64fc17 100644
--- a/src/i18n.cc
+++ b/src/i18n.cc
@@ -768,28 +768,12 @@
   return NULL;
 }
 
-
-template<class T>
-void DeleteNativeObjectAt(const v8::WeakCallbackData<v8::Value, void>& data,
-                          int index) {
-  v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(data.GetValue());
-  delete reinterpret_cast<T*>(obj->GetAlignedPointerFromInternalField(index));
-}
-
-
-static void DestroyGlobalHandle(
-    const v8::WeakCallbackData<v8::Value, void>& data) {
+void DateFormat::DeleteDateFormat(const v8::WeakCallbackInfo<void>& data) {
+  delete reinterpret_cast<icu::SimpleDateFormat*>(data.GetInternalField(0));
   GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
 }
 
 
-void DateFormat::DeleteDateFormat(
-    const v8::WeakCallbackData<v8::Value, void>& data) {
-  DeleteNativeObjectAt<icu::SimpleDateFormat>(data, 0);
-  DestroyGlobalHandle(data);
-}
-
-
 icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
     Isolate* isolate,
     Handle<String> locale,
@@ -847,11 +831,9 @@
   return NULL;
 }
 
-
-void NumberFormat::DeleteNumberFormat(
-    const v8::WeakCallbackData<v8::Value, void>& data) {
-  DeleteNativeObjectAt<icu::DecimalFormat>(data, 0);
-  DestroyGlobalHandle(data);
+void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
+  delete reinterpret_cast<icu::DecimalFormat*>(data.GetInternalField(0));
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
 }
 
 
@@ -908,11 +890,9 @@
   return NULL;
 }
 
-
-void Collator::DeleteCollator(
-    const v8::WeakCallbackData<v8::Value, void>& data) {
-  DeleteNativeObjectAt<icu::Collator>(data, 0);
-  DestroyGlobalHandle(data);
+void Collator::DeleteCollator(const v8::WeakCallbackInfo<void>& data) {
+  delete reinterpret_cast<icu::Collator*>(data.GetInternalField(0));
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
 }
 
 
@@ -973,12 +953,11 @@
   return NULL;
 }
 
-
 void BreakIterator::DeleteBreakIterator(
-    const v8::WeakCallbackData<v8::Value, void>& data) {
-  DeleteNativeObjectAt<icu::BreakIterator>(data, 0);
-  DeleteNativeObjectAt<icu::UnicodeString>(data, 1);
-  DestroyGlobalHandle(data);
+    const v8::WeakCallbackInfo<void>& data) {
+  delete reinterpret_cast<icu::BreakIterator*>(data.GetInternalField(0));
+  delete reinterpret_cast<icu::UnicodeString*>(data.GetInternalField(1));
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
 }
 
 }  // namespace internal
diff --git a/src/i18n.h b/src/i18n.h
index a8db4d1..2a4c208 100644
--- a/src/i18n.h
+++ b/src/i18n.h
@@ -51,8 +51,7 @@
 
   // Release memory we allocated for the DateFormat once the JS object that
   // holds the pointer gets garbage collected.
-  static void DeleteDateFormat(
-      const v8::WeakCallbackData<v8::Value, void>& data);
+  static void DeleteDateFormat(const v8::WeakCallbackInfo<void>& data);
 
  private:
   DateFormat();
@@ -75,8 +74,7 @@
 
   // Release memory we allocated for the NumberFormat once the JS object that
   // holds the pointer gets garbage collected.
-  static void DeleteNumberFormat(
-      const v8::WeakCallbackData<v8::Value, void>& data);
+  static void DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data);
 
  private:
   NumberFormat();
@@ -98,8 +96,7 @@
 
   // Release memory we allocated for the Collator once the JS object that holds
   // the pointer gets garbage collected.
-  static void DeleteCollator(
-      const v8::WeakCallbackData<v8::Value, void>& data);
+  static void DeleteCollator(const v8::WeakCallbackInfo<void>& data);
 
  private:
   Collator();
@@ -121,8 +118,7 @@
 
   // Release memory we allocated for the BreakIterator once the JS object that
   // holds the pointer gets garbage collected.
-  static void DeleteBreakIterator(
-      const v8::WeakCallbackData<v8::Value, void>& data);
+  static void DeleteBreakIterator(const v8::WeakCallbackInfo<void>& data);
 
  private:
   BreakIterator();
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index cafa676..2120a90 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -81,11 +81,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Memory::Address_at(pc_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
@@ -119,21 +114,6 @@
   }
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Memory::Address_at(pc_) = updated_reference;
-  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
-  }
-}
-
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Memory::Object_at(pc_);
@@ -283,7 +263,7 @@
   }
 }
 
-
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 150131c..fdf11c1 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -186,6 +186,42 @@
   return false;
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Memory::Address_at(pc_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return Memory::uint32_at(pc_);
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_reference &&
+           updated_reference < new_base + new_size);
+    Memory::Address_at(pc_) = updated_reference;
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Memory::uint32_at(pc_) = updated_size_reference;
+  } else {
+    UNREACHABLE();
+  }
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+  }
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of Operand
@@ -681,6 +717,18 @@
   emit_operand(dst, src);
 }
 
+void Assembler::xchg_b(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x86);
+  emit_operand(reg, op);
+}
+
+void Assembler::xchg_w(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x87);
+  emit_operand(reg, op);
+}
 
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
@@ -787,14 +835,14 @@
 void Assembler::cmpw(Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
   EMIT(0x66);
-  EMIT(0x39);
+  EMIT(0x3B);
   emit_operand(reg, op);
 }
 
 void Assembler::cmpw(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
   EMIT(0x66);
-  EMIT(0x3B);
+  EMIT(0x39);
   emit_operand(reg, op);
 }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 5105ff5..c3edacb 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -74,6 +74,9 @@
   V(xmm6)                   \
   V(xmm7)
 
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD_REGISTERS DOUBLE_REGISTERS
+
 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
   V(xmm1)                               \
   V(xmm2)                               \
@@ -146,8 +149,7 @@
 #undef DECLARE_REGISTER
 const Register no_reg = {Register::kCode_no_reg};
 
-
-struct DoubleRegister {
+struct XMMRegister {
   enum Code {
 #define REGISTER_CODE(R) kCode_##R,
     DOUBLE_REGISTERS(REGISTER_CODE)
@@ -158,8 +160,8 @@
 
   static const int kMaxNumRegisters = Code::kAfterLast;
 
-  static DoubleRegister from_code(int code) {
-    DoubleRegister result = {code};
+  static XMMRegister from_code(int code) {
+    XMMRegister result = {code};
     return result;
   }
 
@@ -171,23 +173,25 @@
     return reg_code;
   }
 
-  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+  bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
 
   const char* ToString();
 
   int reg_code;
 };
 
+typedef XMMRegister FloatRegister;
+
+typedef XMMRegister DoubleRegister;
+
+typedef XMMRegister Simd128Register;
+
 #define DECLARE_REGISTER(R) \
   const DoubleRegister R = {DoubleRegister::kCode_##R};
 DOUBLE_REGISTERS(DECLARE_REGISTER)
 #undef DECLARE_REGISTER
 const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
 
-typedef DoubleRegister Simd128Register;
-
-typedef DoubleRegister XMMRegister;
-
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -655,6 +659,8 @@
   // Exchange
   void xchg(Register dst, Register src);
   void xchg(Register dst, const Operand& src);
+  void xchg_b(Register reg, const Operand& op);
+  void xchg_w(Register reg, const Operand& op);
 
   // Arithmetics
   void adc(Register dst, int32_t imm32);
@@ -1432,7 +1438,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   // Writes a single byte or word of data in the code stream.  Used for
   // inline tables, e.g., jump-tables.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index b7e33d9..232c56b 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -186,16 +186,9 @@
     __ j(greater_equal, &loop);
 
     // Call the function.
-    if (is_api_function) {
-      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(eax);
-      __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -391,6 +384,141 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : the value to pass to the generator
+  //  -- ebx    : the JSGeneratorObject to resume
+  //  -- edx    : the resume mode (tagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(ebx);
+
+  // Store input value into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), eax);
+  __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, eax, ecx,
+                      kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
+
+  // Load suspended function and context.
+  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+  __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  __ cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
+  __ j(equal, &skip_flooding);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Pop return address.
+  __ PopReturnAddressTo(eax);
+
+  // Push receiver.
+  __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+  // ----------- S t a t e -------------
+  //  -- eax    : return address
+  //  -- ebx    : the JSGeneratorObject to resume
+  //  -- edx    : the resume mode (tagged)
+  //  -- edi    : generator function
+  //  -- esi    : generator context
+  //  -- esp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ sub(ecx, Immediate(Smi::FromInt(1)));
+    __ j(carry, &done_loop, Label::kNear);
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+  __ j(not_equal, &old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    __ PushReturnAddressFrom(eax);
+    __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(eax,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ mov(edx, ebx);
+    __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushReturnAddressFrom(eax);  // Return address.
+    __ Push(ebp);                   // Caller's frame pointer.
+    __ Move(ebp, esp);
+    __ Push(esi);  // Callee's context.
+    __ Push(edi);  // Callee's JS Function.
+
+    // Restore the operand stack.
+    __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+    {
+      Label done_loop, loop;
+      __ Move(ecx, Smi::FromInt(0));
+      __ bind(&loop);
+      __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+      __ j(equal, &done_loop, Label::kNear);
+      __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+      __ add(ecx, Immediate(Smi::FromInt(1)));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
+           Immediate(masm->isolate()->factory()->empty_fixed_array()));
+
+    // Resume the generator function at the continuation.
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(ecx);
+    __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
+    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ mov(eax, ebx);  // Continuation expects generator object in eax.
+    __ jmp(edx);
+  }
+}
 
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
@@ -407,6 +535,8 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
@@ -417,10 +547,9 @@
   __ push(edi);  // Callee's JS function.
   __ push(edx);  // Callee's new target.
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into edi (InterpreterBytecodeRegister).
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-
   Label load_debug_bytecode_array, bytecode_array_loaded;
   __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
          Immediate(DebugInfo::uninitialized()));
@@ -429,8 +558,12 @@
          FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ CompareRoot(kInterpreterBytecodeArrayRegister,
+                 Heap::kUndefinedValueRootIndex);
+  __ j(equal, &bytecode_array_not_present);
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
     __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
                      eax);
@@ -439,8 +572,8 @@
 
   // Push bytecode array.
   __ push(kInterpreterBytecodeArrayRegister);
-  // Push zero for bytecode array offset.
-  __ push(Immediate(0));
+  // Push Smi tagged initial bytecode array offset.
+  __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
 
   // Allocate the local and temporary register file on the stack.
   {
@@ -473,41 +606,36 @@
     __ j(greater_equal, &loop_header);
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load accumulator, register file, bytecode offset, dispatch table into
-  // registers.
+  // Load accumulator, bytecode offset and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ mov(kInterpreterRegisterFileRegister, ebp);
-  __ add(kInterpreterRegisterFileRegister,
-         Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ mov(kInterpreterBytecodeOffsetRegister,
          Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
-                  masm->isolate())));
-
-  // Push dispatch table as a stack located parameter to the bytecode handler.
-  DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
-  __ push(ebx);
+  __ mov(kInterpreterDispatchTableRegister,
+         Immediate(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Dispatch to the first bytecode handler for the function.
-  __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
+  __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
                           kInterpreterBytecodeOffsetRegister, times_1, 0));
-  __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
-  // Restore undefined_value in accumulator (eax)
-  // TODO(rmcilroy): Remove this once we move the dispatch table back into a
-  // register.
-  __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+                      times_pointer_size, 0));
   __ call(ebx);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+  // The return value is in eax.
+
+  // Get the arguments + reciever count.
+  __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ mov(ebx, FieldOperand(ebx, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments and return.
+  __ pop(ecx);
+  __ add(esp, ebx);
+  __ push(ecx);
+  __ ret(0);
 
   // Load debug copy of the bytecode array.
   __ bind(&load_debug_bytecode_array);
@@ -516,31 +644,23 @@
   __ mov(kInterpreterBytecodeArrayRegister,
          FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
   __ jmp(&bytecode_array_loaded);
+
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ bind(&bytecode_array_not_present);
+  __ pop(edx);  // Callee's new target.
+  __ pop(edi);  // Callee's JS function.
+  __ pop(esi);  // Callee's context.
+  __ leave();   // Leave the frame so we can tail call.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
+  __ RecordWriteCodeEntryField(edi, ecx, ebx);
+  __ jmp(ecx);
 }
 
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
-
-  // The return value is in accumulator, which is already in rax.
-
-  // Leave the frame (also dropping the register file).
-  __ leave();
-
-  // Drop receiver + arguments and return.
-  __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
-                           BytecodeArray::kParameterSizeOffset));
-  __ pop(ecx);
-  __ add(esp, ebx);
-  __ push(ecx);
-  __ ret(0);
-}
-
-
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
                                          Register array_limit) {
   // ----------- S t a t e -------------
@@ -558,7 +678,6 @@
   __ j(greater, &loop_header, Label::kNear);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -627,17 +746,26 @@
   __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ LoadHeapObject(ebx,
+                    masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
+                        Code::kHeaderSize - kHeapObjectTag));
+  __ push(ebx);
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register.
-  __ mov(kInterpreterRegisterFileRegister, ebp);
-  __ add(kInterpreterRegisterFileRegister,
-         Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  // Initialize the dispatch table register.
+  __ mov(kInterpreterDispatchTableRegister,
+         Immediate(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Get the bytecode array pointer from the frame.
   __ mov(kInterpreterBytecodeArrayRegister,
-         Operand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+         Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -648,92 +776,173 @@
   }
 
   // Get the target bytecode offset from the frame.
-  __ mov(
-      kInterpreterBytecodeOffsetRegister,
-      Operand(kInterpreterRegisterFileRegister,
-              InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
-  // Push dispatch table as a stack located parameter to the bytecode handler.
-  __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
-                  masm->isolate())));
-  DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
-  __ Pop(esi);
-  __ Push(ebx);
-  __ Push(esi);
-
   // Dispatch to the target bytecode.
-  __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+  __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
                           kInterpreterBytecodeOffsetRegister, times_1, 0));
-  __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
-
-  // Get the context from the frame.
-  __ mov(kContextRegister,
-         Operand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kContextFromRegisterPointer));
-
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+                      times_pointer_size, 0));
   __ jmp(ebx);
 }
 
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ Push(Smi::FromInt(static_cast<int>(type)));
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register and push PC at top
-  // of stack (to simulate initial call to bytecode handler in interpreter entry
-  // trampoline).
-  __ Pop(ebx);
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-  __ Push(ebx);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = edi;
+  Register new_target = edx;
+  Register argument_count = eax;
+
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = argument_count;
+  Register index = ebx;
+  __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
+  __ cmp(index, Immediate(Smi::FromInt(2)));
+  __ j(less, &gotta_call_runtime);
+
+  // Find literals.
+  // edx : native context
+  // ebx : length / index
+  // eax : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = edx;
+  __ mov(native_context, NativeContextOperand());
+
+  __ bind(&loop_top);
+  Register temp = edi;
+
+  // Does the native context match?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousContext));
+  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ cmp(temp, native_context);
+  __ j(not_equal, &loop_bottom);
+  // OSR id set to none?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
+  __ j(not_equal, &loop_bottom);
+  // Literals available?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ mov(ecx, Operand(esp, 0));
+  __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
+  __ push(index);
+  __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = ecx;
+  __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, eax);
+
+  // Link the closure into the optimized function list.
+  // ecx : code entry
+  // edx : native context
+  // edi : closure
+  __ mov(ebx,
+         ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+         closure);
+  // Save closure before the write barrier.
+  __ mov(ebx, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
+                            kDontSaveFPRegs);
+  __ mov(closure, ebx);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ jmp(entry);
+
+  __ bind(&loop_bottom);
+  __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ cmp(index, Immediate(Smi::FromInt(1)));
+  __ j(greater, &loop_top);
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
+                                      SharedFunctionInfo::kSharedCodeIndex));
+  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
+  __ and_(ebx, Code::KindField::kMask);
+  __ shr(ebx, Code::KindField::kShift);
+  __ cmp(ebx, Immediate(Code::BUILTIN));
+  __ j(equal, &gotta_call_runtime_no_stack);
+  // Yes, install the full code.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, ebx);
+  __ jmp(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
+
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
@@ -875,13 +1084,14 @@
 
   // Switch on the state.
   Label not_no_registers, not_tos_eax;
-  __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+  __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
   __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
   __ mov(eax, Operand(esp, 2 * kPointerSize));
-  __ cmp(ecx, FullCodeGenerator::TOS_REG);
+  __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
   __ j(not_equal, &not_tos_eax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, eax.
 
@@ -957,29 +1167,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : argc
-  //  -- esp[0] : return address
-  //  -- esp[4] : first argument (left-hand side)
-  //  -- esp[8] : receiver (right-hand side)
-  // -----------------------------------
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ mov(InstanceOfDescriptor::LeftRegister(),
-           Operand(ebp, 2 * kPointerSize));  // Load left-hand side.
-    __ mov(InstanceOfDescriptor::RightRegister(),
-           Operand(ebp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ ret(2 * kPointerSize);
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax     : argc
@@ -2359,6 +2546,34 @@
           RelocInfo::CODE_TARGET);
 }
 
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : requested object size (untagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ SmiTag(edx);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : requested object size (untagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ SmiTag(edx);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 53b35a3..b711ce9 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -62,12 +62,6 @@
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -80,11 +74,6 @@
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -2057,129 +2046,6 @@
 }
 
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = edx;                       // Object (lhs).
-  Register const function = eax;                     // Function (rhs).
-  Register const object_map = ecx;                   // Map of {object}.
-  Register const function_map = ebx;                 // Map of {function}.
-  Register const function_prototype = function_map;  // Prototype of {function}.
-  Register const scratch = edi;
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi, Label::kNear);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
-  __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ j(not_equal, &fast_case, Label::kNear);
-  __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
-  __ j(not_equal, &fast_case, Label::kNear);
-  __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
-  __ ret(0);
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
-  __ j(not_equal, &slow_case);
-  __ LoadRoot(eax, Heap::kFalseValueRootIndex);
-  __ ret(0);
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
-  __ j(not_equal, &slow_case);
-
-  // Go to the runtime if the function is not a constructor.
-  __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsConstructor));
-  __ j(zero, &slow_case);
-
-  // Ensure that {function} has an instance prototype.
-  __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
-            Immediate(1 << Map::kHasNonInstancePrototype));
-  __ j(not_zero, &slow_case);
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ mov(function_prototype,
-         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  Register const function_prototype_map = scratch;
-  __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
-  __ j(not_equal, &function_prototype_valid, Label::kNear);
-  __ mov(function_prototype,
-         FieldOperand(function_prototype, Map::kPrototypeOffset));
-  __ bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Label done, loop, fast_runtime_fallback;
-  __ mov(eax, isolate()->factory()->true_value());
-  __ bind(&loop);
-
-  // Check if the object needs to be access checked.
-  __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &fast_runtime_fallback, Label::kNear);
-  // Check if the current object is a Proxy.
-  __ CmpInstanceType(object_map, JS_PROXY_TYPE);
-  __ j(equal, &fast_runtime_fallback, Label::kNear);
-
-  __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
-  __ cmp(object, function_prototype);
-  __ j(equal, &done, Label::kNear);
-  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
-  __ cmp(object, isolate()->factory()->null_value());
-  __ j(not_equal, &loop);
-  __ mov(eax, isolate()->factory()->false_value());
-
-  __ bind(&done);
-  __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
-  __ ret(0);
-
-  // Found Proxy or access check needed: Call the runtime.
-  __ bind(&fast_runtime_fallback);
-  __ PopReturnAddressTo(scratch);
-  __ Push(object);
-  __ Push(function_prototype);
-  __ PushReturnAddressFrom(scratch);
-  // Invalidate the instanceof cache.
-  __ Move(eax, Immediate(Smi::FromInt(0)));
-  __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ PopReturnAddressTo(scratch);
-  __ Push(object);
-  __ Push(function);
-  __ PushReturnAddressFrom(scratch);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
-
 // -------------------------------------------------------------------------
 // StringCharCodeAtGenerator
 
@@ -3842,8 +3708,8 @@
   __ j(not_equal, &miss);
   __ push(slot);
   __ push(vector);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
                                                receiver, name, vector, scratch);
   __ pop(vector);
@@ -4103,8 +3969,8 @@
   __ pop(value);
   __ push(slot);
   __ push(vector);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
                                                receiver, key, slot, no_reg);
   __ pop(vector);
@@ -4714,16 +4580,16 @@
   __ bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ mov(Operand(eax, JSObject::kMapOffset), ecx);
-  __ mov(Operand(eax, JSObject::kPropertiesOffset),
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
          masm->isolate()->factory()->empty_fixed_array());
-  __ mov(Operand(eax, JSObject::kElementsOffset),
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
          masm->isolate()->factory()->empty_fixed_array());
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+  __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
 
   // ----------- S t a t e -------------
-  //  -- eax    : result (untagged)
+  //  -- eax    : result (tagged)
   //  -- ebx    : result fields (untagged)
   //  -- edi    : result end (untagged)
   //  -- ecx    : initial map
@@ -4741,10 +4607,6 @@
     // Initialize all in-object fields with undefined.
     __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
     __ InitializeFieldsWithFiller(ebx, edi, edx);
-
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ inc(eax);
     __ Ret();
   }
   __ bind(&slack_tracking);
@@ -4767,10 +4629,6 @@
     __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
     __ InitializeFieldsWithFiller(ebx, edx, edi);
 
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ inc(eax);
-
     // Check if we can finalize the instance size.
     Label finalize;
     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4801,10 +4659,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(ecx);
   }
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ dec(eax);
   __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
   __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ dec(edi);
   __ jmp(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4826,19 +4684,19 @@
   // -----------------------------------
   __ AssertFunction(edi);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make edx point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ mov(edx, ebp);
-    __ jmp(&loop_entry, Label::kNear);
-    __ bind(&loop);
+  // Make edx point to the JavaScript frame.
+  __ mov(edx, ebp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
-    __ j(not_equal, &loop);
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4868,7 +4726,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the rest parameter array in rax.
@@ -4910,7 +4768,7 @@
     Label allocate, done_allocate;
     __ lea(ecx, Operand(eax, times_half_pointer_size,
                         JSArray::kSize + FixedArray::kHeaderSize));
-    __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+    __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in edx.
@@ -4973,35 +4831,50 @@
   // -----------------------------------
   __ AssertFunction(edi);
 
+  // Make ecx point to the JavaScript frame.
+  __ mov(ecx, ebp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ mov(ecx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ cmp(edi, Operand(ecx, StandardFrameConstants::kFunctionOffset));
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewSloppyArgumentsStub);
+    __ bind(&ok);
+  }
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ecx,
-         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx,
+         FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ lea(edx, Operand(ecx, ebx, times_half_pointer_size,
                       StandardFrameConstants::kCallerSPOffset));
 
-  // ecx : number of parameters (tagged)
+  // ebx : number of parameters (tagged)
   // edx : parameters pointer
   // edi : function
+  // ecx : JavaScript frame pointer.
   // esp[0] : return address
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ mov(eax, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+  __ mov(eax, Operand(eax, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // No adaptor, parameter count = argument count.
-  __ mov(ebx, ecx);
-  __ push(ecx);
+  __ mov(ecx, ebx);
+  __ push(ebx);
   __ jmp(&try_allocate, Label::kNear);
 
   // We have an adaptor frame. Patch the parameters pointer.
   __ bind(&adaptor_frame);
-  __ mov(ebx, ecx);
-  __ push(ecx);
-  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ push(ebx);
+  __ mov(edx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ lea(edx, Operand(edx, ecx, times_2,
                       StandardFrameConstants::kCallerSPOffset));
@@ -5035,7 +4908,7 @@
   __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+  __ Allocate(ebx, eax, edi, no_reg, &runtime, NO_ALLOCATION_FLAGS);
 
   // eax = address of new object(s) (tagged)
   // ecx = argument count (smi-tagged)
@@ -5214,19 +5087,19 @@
   // -----------------------------------
   __ AssertFunction(edi);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make edx point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ mov(edx, ebp);
-    __ jmp(&loop_entry, Label::kNear);
-    __ bind(&loop);
+  // Make edx point to the JavaScript frame.
+  __ mov(edx, ebp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
-    __ j(not_equal, &loop);
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewStrictArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -5265,7 +5138,7 @@
   __ lea(ecx,
          Operand(eax, times_half_pointer_size,
                  JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+  __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in edx.
@@ -5686,9 +5559,14 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
 
   __ pop(return_address);
+
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
+
   // context save.
   __ push(context);
 
@@ -5733,7 +5611,7 @@
 
   // Allocate the v8::Arguments structure in the arguments' space since
   // it's not controlled by GC.
-  const int kApiStackSpace = 4;
+  const int kApiStackSpace = 3;
 
   PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
 
@@ -5744,8 +5622,6 @@
   __ mov(ApiParameterOperand(3), scratch);
   // FunctionCallbackInfo::length_.
   __ Move(ApiParameterOperand(4), Immediate(argc()));
-  // FunctionCallbackInfo::is_construct_call_.
-  __ Move(ApiParameterOperand(5), Immediate(0));
 
   // v8::InvocationCallback's argument.
   __ lea(scratch, ApiParameterOperand(2));
@@ -5765,8 +5641,8 @@
   }
   Operand return_value_operand(ebp, return_value_offset * kPointerSize);
   int stack_space = 0;
-  Operand is_construct_call_operand = ApiParameterOperand(5);
-  Operand* stack_space_operand = &is_construct_call_operand;
+  Operand length_operand = ApiParameterOperand(4);
+  Operand* stack_space_operand = &length_operand;
   stack_space = argc() + FCA::kArgsLength + 1;
   stack_space_operand = nullptr;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -5777,14 +5653,34 @@
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- esp[0]                        : return address
-  //  -- esp[4]                        : name
-  //  -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- edx                           : api_function_address
-  // -----------------------------------
-  DCHECK(edx.is(ApiGetterDescriptor::function_address()));
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
+  Register scratch = ebx;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+  __ pop(scratch);  // Pop return address to extend the frame.
+  __ push(receiver);
+  __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
+  __ PushRoot(Heap::kUndefinedValueRootIndex);  // ReturnValue
+  // ReturnValue default value
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
+  __ push(Immediate(ExternalReference::isolate_address(isolate())));
+  __ push(holder);
+  __ push(Immediate(Smi::FromInt(0)));  // should_throw_on_error -> false
+  __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
+  __ push(scratch);  // Restore return address.
 
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5794,9 +5690,6 @@
   // active) in non-GCed stack space.
   const int kApiArgc = 3 + 1;
 
-  Register api_function_address = edx;
-  Register scratch = ebx;
-
   // Load address of v8::PropertyAccessorInfo::args_ array.
   __ lea(scratch, Operand(esp, 2 * kPointerSize));
 
@@ -5806,25 +5699,30 @@
   Operand info_object = ApiParameterOperand(3);
   __ mov(info_object, scratch);
 
+  // Name as handle.
   __ sub(scratch, Immediate(kPointerSize));
-  __ mov(ApiParameterOperand(0), scratch);  // name.
+  __ mov(ApiParameterOperand(0), scratch);
+  // Arguments pointer.
   __ lea(scratch, info_object);
-  __ mov(ApiParameterOperand(1), scratch);  // arguments pointer.
+  __ mov(ApiParameterOperand(1), scratch);
   // Reserve space for optional callback address parameter.
   Operand thunk_last_arg = ApiParameterOperand(2);
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+  Register function_address = edx;
+  __ mov(function_address,
+         FieldOperand(scratch, Foreign::kForeignAddressOffset));
   // +3 is to skip prolog, return address and name handle.
   Operand return_value_operand(
       ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
-  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
-                           thunk_last_arg, kStackUnwindSpace, nullptr,
-                           return_value_operand, NULL);
+  CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
+                           kStackUnwindSpace, nullptr, return_value_operand,
+                           NULL);
 }
 
-
 #undef __
 
 }  // namespace internal
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 2190531..36c83cc 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -586,8 +586,7 @@
   // Allocate new FixedDoubleArray.
   // edx: receiver
   // edi: length of source FixedArray (smi-tagged)
-  AllocationFlags flags =
-      static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+  AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
   __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
               REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
 
@@ -714,7 +713,7 @@
   // Allocate new FixedArray.
   // ebx: length of source FixedDoubleArray (smi-tagged)
   __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
-  __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+  __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
 
   // eax: destination FixedArray
   // ebx: number of elements
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 3cd0ac6..8a1b3b5 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -8,6 +8,7 @@
 
 #if V8_TARGET_ARCH_IA32
 
+#include "src/base/compiler-specific.h"
 #include "src/disasm.h"
 
 namespace disasm {
@@ -29,18 +30,19 @@
 };
 
 static const ByteMnemonic two_operands_instr[] = {
-    {0x01, "add", OPER_REG_OP_ORDER},   {0x03, "add", REG_OPER_OP_ORDER},
-    {0x09, "or", OPER_REG_OP_ORDER},    {0x0B, "or", REG_OPER_OP_ORDER},
-    {0x13, "adc", REG_OPER_OP_ORDER},   {0x1B, "sbb", REG_OPER_OP_ORDER},
-    {0x21, "and", OPER_REG_OP_ORDER},   {0x23, "and", REG_OPER_OP_ORDER},
-    {0x29, "sub", OPER_REG_OP_ORDER},   {0x2A, "subb", REG_OPER_OP_ORDER},
-    {0x2B, "sub", REG_OPER_OP_ORDER},   {0x31, "xor", OPER_REG_OP_ORDER},
-    {0x33, "xor", REG_OPER_OP_ORDER},   {0x38, "cmpb", OPER_REG_OP_ORDER},
-    {0x39, "cmp", OPER_REG_OP_ORDER},   {0x3A, "cmpb", REG_OPER_OP_ORDER},
-    {0x3B, "cmp", REG_OPER_OP_ORDER},   {0x84, "test_b", REG_OPER_OP_ORDER},
-    {0x85, "test", REG_OPER_OP_ORDER},  {0x87, "xchg", REG_OPER_OP_ORDER},
-    {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
-    {0x8D, "lea", REG_OPER_OP_ORDER},   {-1, "", UNSET_OP_ORDER}};
+    {0x01, "add", OPER_REG_OP_ORDER},  {0x03, "add", REG_OPER_OP_ORDER},
+    {0x09, "or", OPER_REG_OP_ORDER},   {0x0B, "or", REG_OPER_OP_ORDER},
+    {0x13, "adc", REG_OPER_OP_ORDER},  {0x1B, "sbb", REG_OPER_OP_ORDER},
+    {0x21, "and", OPER_REG_OP_ORDER},  {0x23, "and", REG_OPER_OP_ORDER},
+    {0x29, "sub", OPER_REG_OP_ORDER},  {0x2A, "subb", REG_OPER_OP_ORDER},
+    {0x2B, "sub", REG_OPER_OP_ORDER},  {0x31, "xor", OPER_REG_OP_ORDER},
+    {0x33, "xor", REG_OPER_OP_ORDER},  {0x38, "cmpb", OPER_REG_OP_ORDER},
+    {0x39, "cmp", OPER_REG_OP_ORDER},  {0x3A, "cmpb", REG_OPER_OP_ORDER},
+    {0x3B, "cmp", REG_OPER_OP_ORDER},  {0x84, "test_b", REG_OPER_OP_ORDER},
+    {0x85, "test", REG_OPER_OP_ORDER}, {0x86, "xchg_b", REG_OPER_OP_ORDER},
+    {0x87, "xchg", REG_OPER_OP_ORDER}, {0x8A, "mov_b", REG_OPER_OP_ORDER},
+    {0x8B, "mov", REG_OPER_OP_ORDER},  {0x8D, "lea", REG_OPER_OP_ORDER},
+    {-1, "", UNSET_OP_ORDER}};
 
 static const ByteMnemonic zero_operands_instr[] = {
   {0xC3, "ret", UNSET_OP_ORDER},
@@ -281,7 +283,7 @@
   bool vex_128() {
     DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
     byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
-    return (checked & 4) != 1;
+    return (checked & 4) == 0;
   }
 
   bool vex_none() {
@@ -389,8 +391,7 @@
   int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
   int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
   int AVXInstruction(byte* data);
-  void AppendToBuffer(const char* format, ...);
-
+  PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
 
   void UnimplementedInstruction() {
     if (abort_on_unimplemented_) {
@@ -1274,7 +1275,7 @@
     const InstructionDesc& idesc = instruction_table_->Get(*data);
     switch (idesc.type) {
       case ZERO_OPERANDS_INSTR:
-        AppendToBuffer(idesc.mnem);
+        AppendToBuffer("%s", idesc.mnem);
         data++;
         break;
 
@@ -1602,11 +1603,26 @@
         while (*data == 0x66) data++;
         if (*data == 0xf && data[1] == 0x1f) {
           AppendToBuffer("nop");  // 0x66 prefix
-        } else if (*data == 0x90) {
-          AppendToBuffer("nop");  // 0x66 prefix
-        } else if (*data == 0x8B) {
+        } else if (*data == 0x39) {
           data++;
-          data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+          data += PrintOperands("cmpw", OPER_REG_OP_ORDER, data);
+        } else if (*data == 0x3B) {
+          data++;
+          data += PrintOperands("cmpw", REG_OPER_OP_ORDER, data);
+        } else if (*data == 0x81) {
+          data++;
+          AppendToBuffer("cmpw ");
+          data += PrintRightOperand(data);
+          int imm = *reinterpret_cast<int16_t*>(data);
+          AppendToBuffer(",0x%x", imm);
+          data += 2;
+        } else if (*data == 0x87) {
+          data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          AppendToBuffer("xchg_w ");
+          data += PrintRightOperand(data);
+          AppendToBuffer(",%s", NameOfCPURegister(regop));
         } else if (*data == 0x89) {
           data++;
           int mod, regop, rm;
@@ -1614,6 +1630,11 @@
           AppendToBuffer("mov_w ");
           data += PrintRightOperand(data);
           AppendToBuffer(",%s", NameOfCPURegister(regop));
+        } else if (*data == 0x8B) {
+          data++;
+          data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+        } else if (*data == 0x90) {
+          AppendToBuffer("nop");  // 0x66 prefix
         } else if (*data == 0xC7) {
           data++;
           AppendToBuffer("%s ", "mov_w");
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
index 2748f90..8a877b1 100644
--- a/src/ia32/interface-descriptors-ia32.cc
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -51,16 +51,11 @@
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
 
 
-const Register InstanceOfDescriptor::LeftRegister() { return edx; }
-const Register InstanceOfDescriptor::RightRegister() { return eax; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return edx; }
 const Register StringCompareDescriptor::RightRegister() { return eax; }
 
-
-const Register ApiGetterDescriptor::function_address() { return edx; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
+const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
 
 const Register MathPowTaggedDescriptor::exponent() { return eax; }
 
@@ -73,6 +68,8 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return eax; }
+const Register HasPropertyDescriptor::KeyRegister() { return ebx; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -252,13 +249,16 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  // register state
+  // eax -- number of arguments
+  // edi -- function
+  // ebx -- allocation site with elements kind
+  Register registers[] = {edi, ebx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
@@ -322,6 +322,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -382,8 +387,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister };
+      kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+      kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -418,6 +423,16 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      eax,  // the value to pass to the generator
+      ebx,  // the JSGeneratorObject to resume
+      edx   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index f9fd8d6..08189e2 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1025,6 +1025,16 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    test(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+    Push(object);
+    CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
+    Pop(object);
+    Check(equal, kOperandIsNotAGeneratorObject);
+  }
+}
 
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
@@ -1517,6 +1527,7 @@
                               AllocationFlags flags) {
   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1558,26 +1569,23 @@
 
   // Calculate new top and bail out if space is exhausted.
   Register top_reg = result_end.is_valid() ? result_end : result;
+
   if (!top_reg.is(result)) {
     mov(top_reg, result);
   }
   add(top_reg, Immediate(object_size));
-  j(carry, gc_required);
   cmp(top_reg, Operand::StaticVariable(allocation_limit));
   j(above, gc_required);
 
-  // Update allocation top.
-  UpdateAllocationTopHelper(top_reg, scratch, flags);
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    UpdateAllocationTopHelper(top_reg, scratch, flags);
+  }
 
-  // Tag result if requested.
-  bool tag_result = (flags & TAG_OBJECT) != 0;
   if (top_reg.is(result)) {
-    if (tag_result) {
-      sub(result, Immediate(object_size - kHeapObjectTag));
-    } else {
-      sub(result, Immediate(object_size));
-    }
-  } else if (tag_result) {
+    sub(result, Immediate(object_size - kHeapObjectTag));
+  } else {
+    // Tag the result.
     DCHECK(kHeapObjectTag == 1);
     inc(result);
   }
@@ -1594,6 +1602,8 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & SIZE_IN_WORDS) == 0);
+  DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1645,18 +1655,16 @@
   } else {
     DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
   }
+
   lea(result_end, Operand(element_count, element_size, header_size));
   add(result_end, result);
-  j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(allocation_limit));
   j(above, gc_required);
 
-  if ((flags & TAG_OBJECT) != 0) {
-    DCHECK(kHeapObjectTag == 1);
-    inc(result);
-  }
+  // Tag result.
+  DCHECK(kHeapObjectTag == 1);
+  inc(result);
 
-  // Update allocation top.
   UpdateAllocationTopHelper(result_end, scratch, flags);
 }
 
@@ -1668,6 +1676,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1711,18 +1720,65 @@
     mov(result_end, object_size);
   }
   add(result_end, result);
-  j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(allocation_limit));
   j(above, gc_required);
 
-  // Tag result if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    DCHECK(kHeapObjectTag == 1);
-    inc(result);
+  // Tag result.
+  DCHECK(kHeapObjectTag == 1);
+  inc(result);
+
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    UpdateAllocationTopHelper(result_end, scratch, flags);
+  }
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register result_end, AllocationFlags flags) {
+  DCHECK(!result.is(result_end));
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, no_reg, flags);
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    Label aligned;
+    test(result, Immediate(kDoubleAlignmentMask));
+    j(zero, &aligned, Label::kNear);
+    mov(Operand(result, 0),
+        Immediate(isolate()->factory()->one_pointer_filler_map()));
+    add(result, Immediate(kDoubleSize / 2));
+    bind(&aligned);
   }
 
-  // Update allocation top.
-  UpdateAllocationTopHelper(result_end, scratch, flags);
+  lea(result_end, Operand(result, object_size));
+  UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+  DCHECK(kHeapObjectTag == 1);
+  inc(result);
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, AllocationFlags flags) {
+  DCHECK(!result.is(result_end));
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, no_reg, flags);
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    Label aligned;
+    test(result, Immediate(kDoubleAlignmentMask));
+    j(zero, &aligned, Label::kNear);
+    mov(Operand(result, 0),
+        Immediate(isolate()->factory()->one_pointer_filler_map()));
+    add(result, Immediate(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
+  lea(result_end, Operand(result, object_size, times_1, 0));
+  UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+  DCHECK(kHeapObjectTag == 1);
+  inc(result);
 }
 
 
@@ -1733,7 +1789,7 @@
                                         MutableMode mode) {
   // Allocate heap number in new space.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   Handle<Map> map = mode == MUTABLE
       ? isolate()->factory()->mutable_heap_number_map()
@@ -1759,15 +1815,9 @@
   and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate two byte string in new space.
-  Allocate(SeqTwoByteString::kHeaderSize,
-           times_1,
-           scratch1,
-           REGISTER_VALUE_IS_INT32,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
+           REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1793,15 +1843,9 @@
   and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate one-byte string in new space.
-  Allocate(SeqOneByteString::kHeaderSize,
-           times_1,
-           scratch1,
-           REGISTER_VALUE_IS_INT32,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
+           REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1821,7 +1865,7 @@
 
   // Allocate one-byte string in new space.
   Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
-           gc_required, TAG_OBJECT);
+           gc_required, NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1839,7 +1883,7 @@
                                         Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1851,12 +1895,8 @@
                                                Register scratch1,
                                                Register scratch2,
                                                Label* gc_required) {
-  Allocate(ConsString::kSize,
-           result,
-           scratch1,
-           scratch2,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1870,7 +1910,7 @@
                                           Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1884,7 +1924,7 @@
                                                  Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1900,7 +1940,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index be11f66..91c68d6 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -19,18 +19,16 @@
 const Register kReturnRegister2 = {Register::kCode_edi};
 const Register kJSFunctionRegister = {Register::kCode_edi};
 const Register kContextRegister = {Register::kCode_esi};
+const Register kAllocateSizeRegister = {Register::kCode_edx};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
 const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
 const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
 const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
 
-// Spill slots used by interpreter dispatch calling convention.
-const int kInterpreterDispatchTableSpillSlot = -1;
-
 // Convenience for platform-independent signatures.  We do not normally
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
@@ -511,6 +509,23 @@
     j(not_zero, not_smi_label, distance);
   }
 
+  // Jump if the value cannot be represented by a smi.
+  inline void JumpIfNotValidSmiValue(Register value, Register scratch,
+                                     Label* on_invalid,
+                                     Label::Distance distance = Label::kFar) {
+    mov(scratch, value);
+    add(scratch, Immediate(0x40000000U));
+    j(sign, on_invalid, distance);
+  }
+
+  // Jump if the unsigned integer value cannot be represented by a smi.
+  inline void JumpIfUIntNotValidSmiValue(
+      Register value, Label* on_invalid,
+      Label::Distance distance = Label::kFar) {
+    cmp(value, Immediate(0x40000000U));
+    j(above_equal, on_invalid, distance);
+  }
+
   void LoadInstanceDescriptors(Register map, Register descriptors);
   void EnumLength(Register dst, Register map);
   void NumberOfOwnDescriptors(Register dst, Register map);
@@ -566,6 +581,10 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
   void AssertReceiver(Register object);
 
@@ -621,6 +640,14 @@
   void Allocate(Register object_size, Register result, Register result_end,
                 Register scratch, Label* gc_required, AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(int object_size, Register result, Register result_end,
+                    AllocationFlags flags);
+  void FastAllocate(Register object_size, Register result, Register result_end,
+                    AllocationFlags flags);
+
   // Allocate a heap number in new space with undefined value. The
   // register scratch2 can be passed as no_reg; the others must be
   // valid registers. Returns tagged pointer in result register, or
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
index a3f23d3..ed26a4e 100644
--- a/src/ic/arm/handler-compiler-arm.cc
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -597,57 +597,6 @@
   __ Ret();
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  __ push(receiver());
-  // Push data from AccessorInfo.
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ Move(scratch2(), data);
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch2(), cell);
-  }
-  __ push(scratch2());
-  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
-  __ Push(scratch2(), scratch2());
-  __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
-  __ Push(scratch2(), reg);
-  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
-  __ push(name());
-
-  // Abi for CallApiGetter
-  Register getter_address_reg = ApiGetterDescriptor::function_address();
-
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ mov(getter_address_reg, Operand(ref));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -744,7 +693,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -784,7 +733,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index 14ed8b4..b0bcc88 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -415,8 +415,8 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ mov(slot, Operand(Smi::FromInt(slot_index)));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key, r4, r5, r6, r9);
   // Cache miss.
@@ -626,10 +626,10 @@
   __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
   __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
   __ b(ne, &slow);
   // Check if the object is a JS array or not.
   __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -682,10 +682,10 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ mov(slot, Operand(Smi::FromInt(slot_index)));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
-      masm, Code::STORE_IC, flags, receiver, key, r5, temporary2, r6, r9);
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, r5, temporary2, r6, r9);
   // Cache miss.
   __ b(&miss);
 
@@ -743,11 +743,11 @@
   DCHECK(StoreDescriptor::ValueRegister().is(r0));
 
   // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
 
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, r3, r4, r5, r6);
+                                               receiver, name, r5, r6, r7, r8);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
diff --git a/src/ic/arm/stub-cache-arm.cc b/src/ic/arm/stub-cache-arm.cc
index 86710eb..db8be30 100644
--- a/src/ic/arm/stub-cache-arm.cc
+++ b/src/ic/arm/stub-cache-arm.cc
@@ -14,10 +14,9 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Kind ic_kind, Code::Flags flags,
-                       StubCache::Table table, Register receiver, Register name,
+                       Code::Flags flags, StubCache::Table table,
+                       Register receiver, Register name,
                        // Number of the cache entry, not scaled.
                        Register offset, Register scratch, Register scratch2,
                        Register offset_scratch) {
@@ -103,9 +102,6 @@
   // entry size being 12.
   DCHECK(sizeof(Entry) == 12);
 
-  // Make sure the flags does not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Make sure that there are no register conflicts.
   DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
@@ -154,8 +150,8 @@
   __ and_(scratch, scratch, Operand(mask));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Primary miss: Compute hash for secondary probe.
   __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
@@ -164,8 +160,8 @@
   __ and_(scratch, scratch, Operand(mask2));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
index a704492..84c0397 100644
--- a/src/ic/arm64/handler-compiler-arm64.cc
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -382,7 +382,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -632,56 +632,6 @@
   __ Ret();
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  __ Push(receiver());
-
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ Mov(scratch3(), Operand(data));
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch3(), cell);
-  }
-  __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
-  __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
-  __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg);
-  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
-  __ Push(name());
-
-  // Abi for CallApiGetter.
-  Register getter_address_reg = x2;
-
-  // Set up the call.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ Mov(getter_address_reg, ref);
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
@@ -782,7 +732,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index 726a68e..3fd384e 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -394,8 +394,8 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ Mov(slot, Operand(Smi::FromInt(slot_index)));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key, scratch1,
                                                scratch2, scratch3, scratch4);
@@ -622,11 +622,10 @@
   __ JumpIfSmi(receiver, &slow);
   __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
 
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ TestAndBranchIfAnySet(
-      x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+  __ TestAndBranchIfAnySet(x10, (1 << Map::kIsAccessCheckNeeded), &slow);
 
   // Check if the object is a JS array or not.
   Register instance_type = x10;
@@ -673,10 +672,10 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ Mov(slot, Operand(Smi::FromInt(slot_index)));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, key, x5, x6, x7, x8);
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, x5, x6, x7, x8);
   // Cache miss.
   __ B(&miss);
 
@@ -729,14 +728,14 @@
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
-  DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x3, x4,
-                     x5, x6));
+  DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x5, x6,
+                     x7, x8));
 
   // Probe the stub cache.
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, x3, x4, x5, x6);
+                                               receiver, name, x5, x6, x7, x8);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
diff --git a/src/ic/arm64/stub-cache-arm64.cc b/src/ic/arm64/stub-cache-arm64.cc
index eb82f2a..8d378ce 100644
--- a/src/ic/arm64/stub-cache-arm64.cc
+++ b/src/ic/arm64/stub-cache-arm64.cc
@@ -23,10 +23,9 @@
 //
 // 'receiver', 'name' and 'offset' registers are preserved on miss.
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Kind ic_kind, Code::Flags flags,
-                       StubCache::Table table, Register receiver, Register name,
-                       Register offset, Register scratch, Register scratch2,
-                       Register scratch3) {
+                       Code::Flags flags, StubCache::Table table,
+                       Register receiver, Register name, Register offset,
+                       Register scratch, Register scratch2, Register scratch3) {
   // Some code below relies on the fact that the Entry struct contains
   // 3 pointers (name, code, map).
   STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
@@ -95,9 +94,6 @@
   Isolate* isolate = masm->isolate();
   Label miss;
 
-  // Make sure the flags does not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Make sure that there are no register conflicts.
   DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
@@ -140,8 +136,8 @@
           CountTrailingZeros(kPrimaryTableSize, 64));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Primary miss: Compute hash for secondary table.
   __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
@@ -149,8 +145,8 @@
   __ And(scratch, scratch, kSecondaryTableSize - 1);
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index 714888c..2c8c092 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -14,16 +14,14 @@
 namespace v8 {
 namespace internal {
 
-
 Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
                                            Handle<Map> stub_holder,
                                            Code::Kind kind,
-                                           CacheHolderFlag cache_holder,
-                                           Code::StubType type) {
-  Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
-  Object* probe = stub_holder->FindInCodeCache(*name, flags);
-  if (probe->IsCode()) return handle(Code::cast(probe));
-  return Handle<Code>::null();
+                                           CacheHolderFlag cache_holder) {
+  Code::Flags flags = Code::ComputeHandlerFlags(kind, cache_holder);
+  Code* code = stub_holder->LookupInCodeCache(*name, flags);
+  if (code == nullptr) return Handle<Code>();
+  return handle(code);
 }
 
 
@@ -66,9 +64,10 @@
   // Compile the stub that is either shared for all names or
   // name specific if there are global objects involved.
   Handle<Code> handler = PropertyHandlerCompiler::Find(
-      cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
+      cache_name, stub_holder_map, Code::LOAD_IC, flag);
   if (!handler.is_null()) return handler;
 
+  TRACE_HANDLER_STATS(isolate, LoadIC_LoadNonexistent);
   NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
   handler = compiler.CompileLoadNonexistent(cache_name);
   Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
@@ -77,9 +76,8 @@
 
 
 Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
-                                              Code::StubType type,
                                               Handle<Name> name) {
-  Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
+  Code::Flags flags = Code::ComputeHandlerFlags(kind, cache_holder());
   Handle<Code> code = GetCodeWithFlags(flags, name);
   PROFILE(isolate(), CodeCreateEvent(Logger::HANDLER_TAG,
                                      AbstractCode::cast(*code), *name));
@@ -194,7 +192,7 @@
   __ Move(receiver(), reg);
   LoadFieldStub stub(isolate(), field);
   GenerateTailCall(masm(), stub.GetCode());
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -204,7 +202,7 @@
   __ Move(receiver(), reg);
   LoadConstantStub stub(isolate(), constant_index);
   GenerateTailCall(masm(), stub.GetCode());
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -221,7 +219,7 @@
   }
   GenerateLoadConstant(isolate()->factory()->undefined_value());
   FrontendFooter(name, &miss);
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -229,7 +227,7 @@
     Handle<Name> name, Handle<AccessorInfo> callback) {
   Register reg = Frontend(name);
   GenerateLoadCallback(reg, callback);
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -240,7 +238,7 @@
   Register holder = Frontend(name);
   GenerateApiAccessorCall(masm(), call_optimization, map(), receiver(),
                           scratch2(), false, no_reg, holder, accessor_index);
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -358,9 +356,21 @@
   } else {
     GenerateLoadInterceptor(reg);
   }
-  return GetCode(kind(), Code::FAST, it->name());
+  return GetCode(kind(), it->name());
 }
 
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(receiver().is(ApiGetterDescriptor::ReceiverRegister()));
+  __ Move(ApiGetterDescriptor::HolderRegister(), reg);
+  // The callback is alive if this instruction is executed,
+  // so the weak cell is not cleared and points to data.
+  Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+  __ GetWeakValue(ApiGetterDescriptor::CallbackRegister(), cell);
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
 
 void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
     LookupIterator* it, Register interceptor_reg) {
@@ -416,7 +426,7 @@
   Register holder = Frontend(name);
   GenerateLoadViaGetter(masm(), map(), receiver(), holder, accessor_index,
                         expected_arguments, scratch2());
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -506,7 +516,7 @@
   PopVectorAndSlot();
   TailCallBuiltin(masm(), MissBuiltin(kind()));
 
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
@@ -534,7 +544,7 @@
   __ bind(&miss);
   if (need_save_restore) PopVectorAndSlot();
   TailCallBuiltin(masm(), MissBuiltin(kind()));
-  return GetCode(kind(), Code::FAST, it->name());
+  return GetCode(kind(), it->name());
 }
 
 
@@ -545,7 +555,7 @@
   GenerateStoreViaSetter(masm(), map(), receiver(), holder, accessor_index,
                          expected_arguments, scratch2());
 
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -556,7 +566,7 @@
   GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
                           receiver(), scratch2(), true, value(), holder,
                           accessor_index);
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
index 76036a2..e34cd68 100644
--- a/src/ic/handler-compiler.h
+++ b/src/ic/handler-compiler.h
@@ -19,7 +19,7 @@
 class PropertyHandlerCompiler : public PropertyAccessCompiler {
  public:
   static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind,
-                           CacheHolderFlag cache_holder, Code::StubType type);
+                           CacheHolderFlag cache_holder);
 
  protected:
   PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind, Handle<Map> map,
@@ -98,7 +98,7 @@
                            Handle<Name> name, Label* miss,
                            PrototypeCheckType check, ReturnHolder return_what);
 
-  Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name);
+  Handle<Code> GetCode(Code::Kind kind, Handle<Name> name);
   void set_holder(Handle<JSObject> holder) { holder_ = holder; }
   Handle<Map> map() const { return map_; }
   void set_map(Handle<Map> map) { map_ = map; }
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
index 132090d..37ab66d 100644
--- a/src/ic/ia32/handler-compiler-ia32.cc
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -594,58 +594,6 @@
 }
 
 
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(scratch2(), scratch3(), receiver()));
-  DCHECK(!AreAliased(scratch2(), scratch3(), reg));
-
-  // Insert additional parameters into the stack frame above return address.
-  __ pop(scratch3());  // Get return address to place it below.
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  __ push(receiver());  // receiver
-  // Push data from AccessorInfo.
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ push(Immediate(data));
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch2(), cell);
-    __ push(scratch2());
-  }
-  __ push(Immediate(isolate()->factory()->undefined_value()));  // ReturnValue
-  // ReturnValue default value
-  __ push(Immediate(isolate()->factory()->undefined_value()));
-  __ push(Immediate(reinterpret_cast<int>(isolate())));
-  __ push(reg);  // holder
-  __ push(Immediate(Smi::FromInt(0)));  // should_throw_on_error -> false
-
-  __ push(name());  // name
-  __ push(scratch3());  // Restore return address.
-
-  // Abi for CallApiGetter
-  Register getter_address = ApiGetterDescriptor::function_address();
-  Address function_address = v8::ToCData<Address>(callback->getter());
-  __ mov(getter_address, Immediate(function_address));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
   // Return the constant value.
   __ LoadObject(eax, value);
@@ -759,7 +707,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -801,7 +749,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index e66716f..d32e1c3 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -336,8 +336,8 @@
   __ push(Immediate(Smi::FromInt(slot)));
   __ push(Immediate(dummy_vector));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key, ebx, edi);
 
@@ -519,10 +519,10 @@
   __ JumpIfSmi(receiver, &slow);
   // Get the map from the receiver.
   __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow);
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &maybe_name_key);
@@ -563,10 +563,10 @@
   __ push(Immediate(Smi::FromInt(slot)));
   __ push(Immediate(dummy_vector));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, key, edi, no_reg);
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, edi, no_reg);
 
   __ pop(VectorStoreICDescriptor::VectorRegister());
   __ pop(VectorStoreICDescriptor::SlotRegister());
diff --git a/src/ic/ia32/stub-cache-ia32.cc b/src/ic/ia32/stub-cache-ia32.cc
index fcfae4b..5a32858 100644
--- a/src/ic/ia32/stub-cache-ia32.cc
+++ b/src/ic/ia32/stub-cache-ia32.cc
@@ -153,9 +153,6 @@
   // being 12.
   DCHECK(sizeof(Entry) == 12);
 
-  // Assert the flags do not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Assert that there are no register conflicts.
   DCHECK(!scratch.is(receiver));
   DCHECK(!scratch.is(name));
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
index 9f1d87a..e89cb4b 100644
--- a/src/ic/ic-compiler.cc
+++ b/src/ic/ic-compiler.cc
@@ -13,26 +13,6 @@
 namespace internal {
 
 
-Handle<Code> PropertyICCompiler::Find(Handle<Name> name,
-                                      Handle<Map> stub_holder, Code::Kind kind,
-                                      ExtraICState extra_state,
-                                      CacheHolderFlag cache_holder) {
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(kind, extra_state, cache_holder);
-  Object* probe = stub_holder->FindInCodeCache(*name, flags);
-  if (probe->IsCode()) return handle(Code::cast(probe));
-  return Handle<Code>::null();
-}
-
-
-bool PropertyICCompiler::IncludesNumberMap(MapHandleList* maps) {
-  for (int i = 0; i < maps->length(); ++i) {
-    if (maps->at(i)->instance_type() == HEAP_NUMBER_TYPE) return true;
-  }
-  return false;
-}
-
-
 Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
     Handle<Map> receiver_map, ExtraICState extra_ic_state) {
   Isolate* isolate = receiver_map->GetIsolate();
@@ -46,179 +26,68 @@
       *receiver_map == isolate->get_initial_js_array_map(elements_kind);
   Handle<Code> stub;
   if (receiver_map->has_indexed_interceptor()) {
+    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadIndexedInterceptorStub);
     stub = LoadIndexedInterceptorStub(isolate).GetCode();
   } else if (receiver_map->IsStringMap()) {
+    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadIndexedStringStub);
     stub = LoadIndexedStringStub(isolate).GetCode();
   } else if (receiver_map->has_sloppy_arguments_elements()) {
+    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_KeyedLoadSloppyArgumentsStub);
     stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode();
   } else if (receiver_map->has_fast_elements() ||
              receiver_map->has_fixed_typed_array_elements()) {
+    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadFastElementStub);
     stub = LoadFastElementStub(isolate, is_js_array, elements_kind,
                                convert_hole_to_undefined).GetCode();
   } else {
     DCHECK(receiver_map->has_dictionary_elements());
+    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
     stub = LoadDictionaryElementStub(isolate, LoadICState(extra_ic_state))
                .GetCode();
   }
   return stub;
 }
 
-
 Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
-    Handle<Map> receiver_map, LanguageMode language_mode,
-    KeyedAccessStoreMode store_mode) {
+    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
   Isolate* isolate = receiver_map->GetIsolate();
-  ExtraICState extra_state =
-      KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
 
   DCHECK(store_mode == STANDARD_STORE ||
          store_mode == STORE_AND_GROW_NO_TRANSITION ||
          store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
          store_mode == STORE_NO_TRANSITION_HANDLE_COW);
 
-  PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+  PropertyICCompiler compiler(isolate);
   Handle<Code> code =
       compiler.CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
   return code;
 }
 
-
-Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
-                                             ExtraICState state) {
-  Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
-  UnseededNumberDictionary* dictionary =
-      isolate->heap()->non_monomorphic_cache();
-  int entry = dictionary->FindEntry(isolate, flags);
-  DCHECK(entry != -1);
-  Object* code = dictionary->ValueAt(entry);
-  // This might be called during the marking phase of the collector
-  // hence the unchecked cast.
-  return reinterpret_cast<Code*>(code);
-}
-
-
-static void FillCache(Isolate* isolate, Handle<Code> code) {
-  Handle<UnseededNumberDictionary> dictionary = UnseededNumberDictionary::Set(
-      isolate->factory()->non_monomorphic_cache(), code->flags(), code);
-  isolate->heap()->SetRootNonMonomorphicCache(*dictionary);
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
-                                              InlineCacheState ic_state,
-                                              ExtraICState extra_state) {
-  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
-  Handle<UnseededNumberDictionary> cache =
-      isolate->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  PropertyICCompiler compiler(isolate, Code::STORE_IC);
-  Handle<Code> code;
-  if (ic_state == UNINITIALIZED) {
-    code = compiler.CompileStoreInitialize(flags);
-  } else if (ic_state == PREMONOMORPHIC) {
-    code = compiler.CompileStorePreMonomorphic(flags);
-  } else if (ic_state == GENERIC) {
-    code = compiler.CompileStoreGeneric(flags);
-  } else if (ic_state == MEGAMORPHIC) {
-    code = compiler.CompileStoreMegamorphic(flags);
-  } else {
-    UNREACHABLE();
-  }
-
-  FillCache(isolate, code);
-  return code;
-}
-
-
 void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
     MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
-    CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
-    LanguageMode language_mode) {
+    CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
   Isolate* isolate = receiver_maps->at(0)->GetIsolate();
   DCHECK(store_mode == STANDARD_STORE ||
          store_mode == STORE_AND_GROW_NO_TRANSITION ||
          store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
          store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-  ExtraICState extra_state =
-      KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
-  PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+  PropertyICCompiler compiler(isolate);
   compiler.CompileKeyedStorePolymorphicHandlers(
       receiver_maps, transitioned_maps, handlers, store_mode);
 }
 
 
-Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
-  LoadIC::GenerateInitialize(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
-  PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG,
-                                     AbstractCode::cast(*code), 0));
-  return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
-  StoreIC::GenerateInitialize(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
-  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG,
-                                     AbstractCode::cast(*code), 0));
-  return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
-  StoreIC::GeneratePreMonomorphic(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
-  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG,
-                                     AbstractCode::cast(*code), 0));
-  return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
-  ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
-  LanguageMode language_mode = StoreICState::GetLanguageMode(extra_state);
-  GenerateRuntimeSetProperty(masm(), language_mode);
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
-  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG,
-                                     AbstractCode::cast(*code), 0));
-  return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
-  StoreIC::GenerateMegamorphic(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
-  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG,
-                                     AbstractCode::cast(*code), 0));
-  return code;
-}
-
-
-Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
-                                         Handle<Name> name,
-                                         InlineCacheState state) {
-  Code::Flags flags =
-      Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
-  Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(),
-          CodeCreateEvent(log_kind(code), AbstractCode::cast(*code), *name));
-#ifdef DEBUG
-  code->VerifyEmbeddedObjects();
-#endif
-  return code;
-}
-
-
 void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
     MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
     CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
   for (int i = 0; i < receiver_maps->length(); ++i) {
     Handle<Map> receiver_map(receiver_maps->at(i));
     Handle<Code> cached_stub;
-    Handle<Map> transitioned_map =
-        Map::FindTransitionedMap(receiver_map, receiver_maps);
+    Handle<Map> transitioned_map;
+    {
+      Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
+      if (tmap != nullptr) transitioned_map = handle(tmap);
+    }
 
     // TODO(mvstanton): The code below is doing pessimistic elements
     // transitions. I would like to stop doing that and rely on Allocation Site
@@ -265,34 +134,21 @@
   bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
   Handle<Code> stub;
   if (receiver_map->has_sloppy_arguments_elements()) {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
     stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
   } else if (receiver_map->has_fast_elements() ||
              receiver_map->has_fixed_typed_array_elements()) {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
     stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
                                 store_mode).GetCode();
   } else {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
     stub = StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
   }
   return stub;
 }
 
 
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
-    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
-  Handle<Code> stub =
-      CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
-
-  Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-
-  __ DispatchWeakMap(receiver(), scratch1(), scratch2(), cell, stub,
-                     DO_SMI_CHECK);
-
-  TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
-
-  return GetCode(kind(), Code::NORMAL, factory()->empty_string());
-}
-
-
 #undef __
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ic/ic-compiler.h b/src/ic/ic-compiler.h
index 3a5aecc..9d8884f 100644
--- a/src/ic/ic-compiler.h
+++ b/src/ic/ic-compiler.h
@@ -13,25 +13,15 @@
 
 class PropertyICCompiler : public PropertyAccessCompiler {
  public:
-  // Finds the Code object stored in the Heap::non_monomorphic_cache().
-  static Code* FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
-                                  ExtraICState extra_ic_state);
-
-  // Named
-  static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
-                                   ExtraICState extra_state);
-
   // Keyed
   static Handle<Code> ComputeKeyedLoadMonomorphicHandler(
       Handle<Map> receiver_map, ExtraICState extra_ic_state);
 
   static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
-      Handle<Map> receiver_map, LanguageMode language_mode,
-      KeyedAccessStoreMode store_mode);
+      Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
   static void ComputeKeyedStorePolymorphicHandlers(
       MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
-      CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
-      LanguageMode language_mode);
+      CodeHandleList* handlers, KeyedAccessStoreMode store_mode);
 
   // Helpers
   // TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
@@ -41,57 +31,16 @@
 
 
  private:
-  PropertyICCompiler(Isolate* isolate, Code::Kind kind,
-                     ExtraICState extra_ic_state = kNoExtraICState,
-                     CacheHolderFlag cache_holder = kCacheOnReceiver)
-      : PropertyAccessCompiler(isolate, kind, cache_holder),
-        extra_ic_state_(extra_ic_state) {}
-
-  static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map,
-                           Code::Kind kind,
-                           ExtraICState extra_ic_state = kNoExtraICState,
-                           CacheHolderFlag cache_holder = kCacheOnReceiver);
-
-  Handle<Code> CompileLoadInitialize(Code::Flags flags);
-  Handle<Code> CompileStoreInitialize(Code::Flags flags);
-  Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
-  Handle<Code> CompileStoreGeneric(Code::Flags flags);
-  Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
+  explicit PropertyICCompiler(Isolate* isolate)
+      : PropertyAccessCompiler(isolate, Code::KEYED_STORE_IC,
+                               kCacheOnReceiver) {}
 
   Handle<Code> CompileKeyedStoreMonomorphicHandler(
       Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
-  Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
-                                            KeyedAccessStoreMode store_mode);
   void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
                                             MapHandleList* transitioned_maps,
                                             CodeHandleList* handlers,
                                             KeyedAccessStoreMode store_mode);
-
-  bool IncludesNumberMap(MapHandleList* maps);
-
-  Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name,
-                       InlineCacheState state = MONOMORPHIC);
-
-  Logger::LogEventsAndTags log_kind(Handle<Code> code) {
-    if (kind() == Code::LOAD_IC) {
-      return code->ic_state() == MONOMORPHIC ? Logger::LOAD_IC_TAG
-                                             : Logger::LOAD_POLYMORPHIC_IC_TAG;
-    } else if (kind() == Code::KEYED_LOAD_IC) {
-      return code->ic_state() == MONOMORPHIC
-                 ? Logger::KEYED_LOAD_IC_TAG
-                 : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
-    } else if (kind() == Code::STORE_IC) {
-      return code->ic_state() == MONOMORPHIC ? Logger::STORE_IC_TAG
-                                             : Logger::STORE_POLYMORPHIC_IC_TAG;
-    } else {
-      DCHECK_EQ(Code::KEYED_STORE_IC, kind());
-      return code->ic_state() == MONOMORPHIC
-                 ? Logger::KEYED_STORE_IC_TAG
-                 : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
-    }
-  }
-
-  const ExtraICState extra_ic_state_;
 };
 
 
diff --git a/src/ic/ic-inl.h b/src/ic/ic-inl.h
index 998bd8c..f77c40a 100644
--- a/src/ic/ic-inl.h
+++ b/src/ic/ic-inl.h
@@ -87,42 +87,12 @@
 
 void IC::set_target(Code* code) {
   SetTargetAtAddress(address(), code, constant_pool());
-  target_set_ = true;
 }
 
-
-void LoadIC::set_target(Code* code) {
-  // The contextual mode must be preserved across IC patching.
-  DCHECK(LoadICState::GetTypeofMode(code->extra_ic_state()) ==
-         LoadICState::GetTypeofMode(target()->extra_ic_state()));
-
-  IC::set_target(code);
-}
-
-
-void StoreIC::set_target(Code* code) {
-  // Language mode must be preserved across IC patching.
-  DCHECK(StoreICState::GetLanguageMode(code->extra_ic_state()) ==
-         StoreICState::GetLanguageMode(target()->extra_ic_state()));
-  IC::set_target(code);
-}
-
-
-void KeyedStoreIC::set_target(Code* code) {
-  // Language mode must be preserved across IC patching.
-  DCHECK(StoreICState::GetLanguageMode(code->extra_ic_state()) ==
-         language_mode());
-  IC::set_target(code);
-}
-
-
-Code* IC::raw_target() const {
+Code* IC::target() const {
   return GetTargetAtAddress(address(), constant_pool());
 }
 
-void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
-
-
 Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
                                       bool receiver_is_holder, Isolate* isolate,
                                       CacheHolderFlag* flag) {
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index c5835e4..2dcb8d9 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -9,7 +9,6 @@
 #include "src/api-arguments.h"
 #include "src/arguments.h"
 #include "src/base/bits.h"
-#include "src/code-factory.h"
 #include "src/codegen.h"
 #include "src/conversions.h"
 #include "src/execution.h"
@@ -38,7 +37,7 @@
       return '.';
     case MONOMORPHIC:
       return '1';
-    case PROTOTYPE_FAILURE:
+    case RECOMPUTE_HANDLER:
       return '^';
     case POLYMORPHIC:
       return 'P';
@@ -95,8 +94,8 @@
 void IC::TraceIC(const char* type, Handle<Object> name) {
   if (FLAG_trace_ic) {
     if (AddressIsDeoptimizedCode()) return;
-    State new_state =
-        UseVector() ? nexus()->StateFromFeedback() : raw_target()->ic_state();
+    DCHECK(UseVector());
+    State new_state = nexus()->StateFromFeedback();
     TraceIC(type, name, state(), new_state);
   }
 }
@@ -105,8 +104,7 @@
 void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
                  State new_state) {
   if (FLAG_trace_ic) {
-    Code* new_target = raw_target();
-    PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
+    PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
 
     // TODO(jkummerow): Add support for "apply". The logic is roughly:
     // marker = [fp_ + kMarkerOffset];
@@ -123,7 +121,7 @@
     }
 
     const char* modifier = "";
-    if (new_target->kind() == Code::KEYED_STORE_IC) {
+    if (kind() == Code::KEYED_STORE_IC) {
       KeyedAccessStoreMode mode =
           casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
       modifier = GetTransitionMarkModifier(mode);
@@ -146,7 +144,6 @@
 
 IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
     : isolate_(isolate),
-      target_set_(false),
       vector_set_(false),
       target_maps_set_(false),
       nexus_(nexus) {
@@ -185,11 +182,11 @@
     constant_pool_address_ = constant_pool;
   }
   pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
-  target_ = handle(raw_target(), isolate);
-  kind_ = target_->kind();
-  state_ = UseVector() ? nexus->StateFromFeedback() : target_->ic_state();
+  Code* target = this->target();
+  kind_ = target->kind();
+  state_ = UseVector() ? nexus->StateFromFeedback() : target->ic_state();
   old_state_ = state_;
-  extra_ic_state_ = target_->extra_ic_state();
+  extra_ic_state_ = target->extra_ic_state();
 }
 
 
@@ -258,15 +255,10 @@
   }
 }
 
-
-bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
-                                                Handle<String> name) {
-  if (!IsNameCompatibleWithPrototypeFailure(name)) return false;
-  if (UseVector()) {
-    maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
-  } else {
-    maybe_handler_ = target()->FindHandlerForMap(*receiver_map());
-  }
+bool IC::ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name) {
+  if (!RecomputeHandlerForName(name)) return false;
+  DCHECK(UseVector());
+  maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
 
   // The current map wasn't handled yet. There's no reason to stay monomorphic,
   // *unless* we're moving from a deprecated map to its replacement, or
@@ -283,38 +275,24 @@
                                                receiver_map()->elements_kind());
   }
 
-  CacheHolderFlag flag;
-  Handle<Map> ic_holder_map(GetICCacheHolder(receiver_map(), isolate(), &flag));
-
-  DCHECK(flag != kCacheOnReceiver || receiver->IsJSObject());
-  DCHECK(flag != kCacheOnPrototype || !receiver->IsJSReceiver());
-  DCHECK(flag != kCacheOnPrototypeReceiverIsDictionary);
-
-  if (state() == MONOMORPHIC) {
-    int index = ic_holder_map->IndexInCodeCache(*name, *target());
-    if (index >= 0) {
-      ic_holder_map->RemoveFromCodeCache(*name, *target(), index);
-    }
-  }
-
   if (receiver->IsJSGlobalObject()) {
     Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
     LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
     if (it.state() == LookupIterator::ACCESS_CHECK) return false;
     if (!it.IsFound()) return false;
+    if (!it.GetHolder<JSReceiver>()->IsJSGlobalObject()) return false;
     return it.property_details().cell_type() == PropertyCellType::kConstant;
   }
 
   return true;
 }
 
-
-bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
-  if (target()->is_keyed_stub()) {
+bool IC::RecomputeHandlerForName(Handle<Object> name) {
+  if (is_keyed()) {
     // Determine whether the failure is due to a name failure.
     if (!name->IsName()) return false;
-    Name* stub_name =
-        UseVector() ? nexus()->FindFirstName() : target()->FindFirstName();
+    DCHECK(UseVector());
+    Name* stub_name = nexus()->FindFirstName();
     if (*name != stub_name) return false;
   }
 
@@ -331,10 +309,8 @@
   // Remove the target from the code cache if it became invalid
   // because of changes in the prototype chain to avoid hitting it
   // again.
-  if (TryRemoveInvalidPrototypeDependentStub(receiver,
-                                             Handle<String>::cast(name))) {
-    MarkPrototypeFailure(name);
-    return;
+  if (ShouldRecomputeHandler(receiver, Handle<String>::cast(name))) {
+    MarkRecomputeHandler(name);
   }
 }
 
@@ -382,43 +358,12 @@
         *polymorphic_delta = 1;
       }
       break;
-    case PROTOTYPE_FAILURE:
+    case RECOMPUTE_HANDLER:
     case DEBUG_STUB:
       UNREACHABLE();
   }
 }
 
-
-void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
-                               State old_state, State new_state,
-                               bool target_remains_ic_stub) {
-  Code* host =
-      isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
-  if (host->kind() != Code::FUNCTION) return;
-
-  if (FLAG_type_info_threshold > 0 && target_remains_ic_stub &&
-      // Not all Code objects have TypeFeedbackInfo.
-      host->type_feedback_info()->IsTypeFeedbackInfo()) {
-    int polymorphic_delta = 0;  // "Polymorphic" here includes monomorphic.
-    int generic_delta = 0;      // "Generic" here includes megamorphic.
-    ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
-                              &generic_delta);
-    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
-    info->change_ic_with_type_info_count(polymorphic_delta);
-    info->change_ic_generic_count(generic_delta);
-  }
-  if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
-    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
-    info->change_own_type_change_checksum();
-  }
-  host->set_profiler_ticks(0);
-  isolate->runtime_profiler()->NotifyICChanged();
-  // TODO(2029): When an optimized function is patched, it would
-  // be nice to propagate the corresponding type information to its
-  // unoptimized version for the benefit of later inlining.
-}
-
-
 // static
 void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
   if (host->kind() != Code::FUNCTION) return;
@@ -432,49 +377,51 @@
   // unoptimized version for the benefit of later inlining.
 }
 
-
 void IC::PostPatching(Address address, Code* target, Code* old_target) {
   // Type vector based ICs update these statistics at a different time because
   // they don't always patch on state change.
   if (ICUseVector(target->kind())) return;
 
-  Isolate* isolate = target->GetHeap()->isolate();
-  State old_state = UNINITIALIZED;
-  State new_state = UNINITIALIZED;
-  bool target_remains_ic_stub = false;
-  if (old_target->is_inline_cache_stub() && target->is_inline_cache_stub()) {
-    old_state = old_target->ic_state();
-    new_state = target->ic_state();
-    target_remains_ic_stub = true;
+  DCHECK(old_target->is_inline_cache_stub());
+  DCHECK(target->is_inline_cache_stub());
+  State old_state = old_target->ic_state();
+  State new_state = target->ic_state();
+
+  Isolate* isolate = target->GetIsolate();
+  Code* host =
+      isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+  if (host->kind() != Code::FUNCTION) return;
+
+  // Not all Code objects have TypeFeedbackInfo.
+  if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
+    if (FLAG_type_info_threshold > 0) {
+      int polymorphic_delta = 0;  // "Polymorphic" here includes monomorphic.
+      int generic_delta = 0;      // "Generic" here includes megamorphic.
+      ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
+                                &generic_delta);
+      TypeFeedbackInfo* info =
+          TypeFeedbackInfo::cast(host->type_feedback_info());
+      info->change_ic_with_type_info_count(polymorphic_delta);
+      info->change_ic_generic_count(generic_delta);
+    }
+    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+    info->change_own_type_change_checksum();
   }
-
-  OnTypeFeedbackChanged(isolate, address, old_state, new_state,
-                        target_remains_ic_stub);
+  host->set_profiler_ticks(0);
+  isolate->runtime_profiler()->NotifyICChanged();
+  // TODO(2029): When an optimized function is patched, it would
+  // be nice to propagate the corresponding type information to its
+  // unoptimized version for the benefit of later inlining.
 }
 
-
 void IC::Clear(Isolate* isolate, Address address, Address constant_pool) {
   Code* target = GetTargetAtAddress(address, constant_pool);
 
   // Don't clear debug break inline cache as it will remove the break point.
   if (target->is_debug_stub()) return;
 
-  switch (target->kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      return;
-    case Code::COMPARE_IC:
-      return CompareIC::Clear(isolate, address, target, constant_pool);
-    case Code::CALL_IC:  // CallICs are vector-based and cleared differently.
-    case Code::BINARY_OP_IC:
-    case Code::TO_BOOLEAN_IC:
-      // Clearing these is tricky and does not
-      // make any performance difference.
-      return;
-    default:
-      UNREACHABLE();
+  if (target->kind() == Code::COMPARE_IC) {
+    CompareIC::Clear(isolate, address, target, constant_pool);
   }
 }
 
@@ -509,15 +456,6 @@
 }
 
 
-void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
-                    Address constant_pool) {
-  if (IsCleared(target)) return;
-  Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC,
-                                                      target->extra_ic_state());
-  SetTargetAtAddress(address, code, constant_pool);
-}
-
-
 void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
   if (IsCleared(nexus)) return;
   nexus->ConfigurePremonomorphic();
@@ -525,15 +463,6 @@
 }
 
 
-void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
-                         Address constant_pool) {
-  if (IsCleared(target)) return;
-  Handle<Code> code = pre_monomorphic_stub(
-      isolate, StoreICState::GetLanguageMode(target->extra_ic_state()));
-  SetTargetAtAddress(address, *code, constant_pool);
-}
-
-
 void KeyedStoreIC::Clear(Isolate* isolate, Code* host,
                          KeyedStoreICNexus* nexus) {
   if (IsCleared(nexus)) return;
@@ -671,8 +600,8 @@
     if (FLAG_use_ic) {
       DCHECK(UseVector());
       ConfigureVectorState(MEGAMORPHIC, name);
-      TRACE_IC("LoadIC", name);
       TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
+      TRACE_IC("LoadIC", name);
     }
     Handle<Object> result;
     ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
@@ -703,6 +632,7 @@
       }
 
       if (use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadScriptContextFieldStub);
         LoadScriptContextFieldStub stub(isolate(), &lookup_result);
         PatchCache(name, stub.GetCode());
       }
@@ -750,7 +680,7 @@
 
 bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
   if (!code->is_handler()) return false;
-  if (target()->is_keyed_stub() && state() != PROTOTYPE_FAILURE) return false;
+  if (is_keyed() && state() != RECOMPUTE_HANDLER) return false;
   Handle<Map> map = receiver_map();
   MapHandleList maps;
   CodeHandleList handlers;
@@ -783,14 +713,11 @@
   if (number_of_maps == 0 && state() != MONOMORPHIC && state() != POLYMORPHIC) {
     return false;
   }
-  if (UseVector()) {
-    if (!nexus()->FindHandlers(&handlers, maps.length())) return false;
-  } else {
-    if (!target()->FindHandlers(&handlers, maps.length())) return false;
-  }
+  DCHECK(UseVector());
+  if (!nexus()->FindHandlers(&handlers, maps.length())) return false;
 
   number_of_valid_maps++;
-  if (number_of_valid_maps > 1 && target()->is_keyed_stub()) return false;
+  if (number_of_valid_maps > 1 && is_keyed()) return false;
   Handle<Code> ic;
   if (number_of_valid_maps == 1) {
     ConfigureVectorState(name, receiver_map(), code);
@@ -808,7 +735,6 @@
     ConfigureVectorState(name, &maps, &handlers);
   }
 
-  if (!UseVector()) set_target(*ic);
   return true;
 }
 
@@ -823,7 +749,7 @@
   MapHandleList maps;
   CodeHandleList handlers;
   TargetMaps(&maps);
-  if (!target()->FindHandlers(&handlers, maps.length())) return;
+  if (!nexus()->FindHandlers(&handlers, maps.length())) return;
   for (int i = 0; i < maps.length(); i++) {
     UpdateMegamorphicCache(*maps.at(i), *name, *handlers.at(i));
   }
@@ -836,11 +762,12 @@
   ElementsKind target_elements_kind = target_map->elements_kind();
   bool more_general_transition = IsMoreGeneralElementsKindTransition(
       source_map->elements_kind(), target_elements_kind);
-  Map* transitioned_map =
-      more_general_transition
-          ? source_map->LookupElementsTransitionMap(target_elements_kind)
-          : NULL;
-
+  Map* transitioned_map = nullptr;
+  if (more_general_transition) {
+    MapHandleList map_list;
+    map_list.Add(handle(target_map));
+    transitioned_map = source_map->FindElementsKindTransitionedMap(&map_list);
+  }
   return transitioned_map == target_map;
 }
 
@@ -851,29 +778,23 @@
     case PREMONOMORPHIC:
       UpdateMonomorphicIC(code, name);
       break;
-    case PROTOTYPE_FAILURE:
+    case RECOMPUTE_HANDLER:
     case MONOMORPHIC:
     case POLYMORPHIC:
-      if (!target()->is_keyed_stub() || state() == PROTOTYPE_FAILURE) {
+      if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
         if (UpdatePolymorphicIC(name, code)) break;
         // For keyed stubs, we can't know whether old handlers were for the
         // same key.
         CopyICToMegamorphicCache(name);
       }
-      if (UseVector()) {
-        ConfigureVectorState(MEGAMORPHIC, name);
-      } else {
-        set_target(*megamorphic_stub());
-      }
+      DCHECK(UseVector());
+      ConfigureVectorState(MEGAMORPHIC, name);
     // Fall through.
     case MEGAMORPHIC:
       UpdateMegamorphicCache(*receiver_map(), *name, *code);
       // Indicate that we've handled this case.
-      if (UseVector()) {
-        vector_set_ = true;
-      } else {
-        target_set_ = true;
-      }
+      DCHECK(UseVector());
+      vector_set_ = true;
       break;
     case DEBUG_STUB:
       break;
@@ -883,25 +804,11 @@
   }
 }
 
-
-Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
-                                     ExtraICState extra_state) {
-  return LoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
-}
-
-
 Handle<Code> LoadIC::initialize_stub_in_optimized_code(
     Isolate* isolate, ExtraICState extra_state, State initialization_state) {
   return LoadICStub(isolate, LoadICState(extra_state)).GetCode();
 }
 
-
-Handle<Code> KeyedLoadIC::initialize_stub(Isolate* isolate,
-                                          ExtraICState extra_state) {
-  return KeyedLoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
-}
-
-
 Handle<Code> KeyedLoadIC::initialize_stub_in_optimized_code(
     Isolate* isolate, State initialization_state, ExtraICState extra_state) {
   if (initialization_state != MEGAMORPHIC) {
@@ -911,68 +818,27 @@
 }
 
 
-static Handle<Code> KeyedStoreICInitializeStubHelper(
-    Isolate* isolate, LanguageMode language_mode,
-    InlineCacheState initialization_state) {
-  switch (initialization_state) {
-    case UNINITIALIZED:
-      return is_strict(language_mode)
-                 ? isolate->builtins()->KeyedStoreIC_Initialize_Strict()
-                 : isolate->builtins()->KeyedStoreIC_Initialize();
-    case PREMONOMORPHIC:
-      return is_strict(language_mode)
-                 ? isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict()
-                 : isolate->builtins()->KeyedStoreIC_PreMonomorphic();
-    case MEGAMORPHIC:
-      return is_strict(language_mode)
-                 ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
-                 : isolate->builtins()->KeyedStoreIC_Megamorphic();
-    default:
-      UNREACHABLE();
-  }
-  return Handle<Code>();
-}
-
-
-Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
-                                           LanguageMode language_mode,
-                                           State initialization_state) {
-  if (initialization_state != MEGAMORPHIC) {
-    VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
-    return stub.GetCode();
-  }
-
-  return KeyedStoreICInitializeStubHelper(isolate, language_mode,
-                                          initialization_state);
-}
-
-
 Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code(
     Isolate* isolate, LanguageMode language_mode, State initialization_state) {
+  StoreICState state = StoreICState(language_mode);
   if (initialization_state != MEGAMORPHIC) {
-    VectorKeyedStoreICStub stub(isolate, StoreICState(language_mode));
-    return stub.GetCode();
+    return VectorKeyedStoreICStub(isolate, state).GetCode();
   }
-
-  return KeyedStoreICInitializeStubHelper(isolate, language_mode,
-                                          initialization_state);
+  return ChooseMegamorphicStub(isolate, state.GetExtraICState());
 }
 
 
 Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
                                                  ExtraICState extra_state) {
   LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
-  return KeyedStoreICInitializeStubHelper(isolate, mode, MEGAMORPHIC);
-}
-
-
-Handle<Code> LoadIC::megamorphic_stub() {
-  DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
-  return KeyedLoadIC::ChooseMegamorphicStub(isolate(), extra_ic_state());
+  return is_strict(mode)
+             ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
+             : isolate->builtins()->KeyedStoreIC_Megamorphic();
 }
 
 
 Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
+  TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
   LoadFieldStub stub(isolate(), index);
   return stub.GetCode();
 }
@@ -991,8 +857,9 @@
   } else if (accessors->IsAccessorPair()) {
     Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
                           isolate);
-    if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo())
+    if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo()) {
       return false;
+    }
     Handle<JSObject> holder = lookup->GetHolder<JSObject>();
     Handle<Object> receiver = lookup->GetReceiver();
     if (holder->HasFastProperties()) {
@@ -1070,23 +937,38 @@
 
 
 Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
+  // Try to find a globally shared handler stub.
+  Handle<Code> code = GetMapIndependentHandler(lookup);
+  if (!code.is_null()) return code;
+
+  // Otherwise check the map's handler cache for a map-specific handler, and
+  // compile one if the cache comes up empty.
   bool receiver_is_holder =
       lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
   CacheHolderFlag flag;
-  Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder(
-      receiver_map(), receiver_is_holder, isolate(), &flag);
+  Handle<Map> stub_holder_map;
+  if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+    stub_holder_map = IC::GetHandlerCacheHolder(
+        receiver_map(), receiver_is_holder, isolate(), &flag);
+  } else {
+    DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
+    // Store handlers cannot be cached on prototypes.
+    flag = kCacheOnReceiver;
+    stub_holder_map = receiver_map();
+  }
 
-  Handle<Code> code = PropertyHandlerCompiler::Find(
-      lookup->name(), stub_holder_map, kind(), flag,
-      lookup->is_dictionary_holder() ? Code::NORMAL : Code::FAST);
+  code = PropertyHandlerCompiler::Find(lookup->name(), stub_holder_map, kind(),
+                                       flag);
   // Use the cached value if it exists, and if it is different from the
   // handler that just missed.
   if (!code.is_null()) {
-    if (!maybe_handler_.is_null() &&
-        !maybe_handler_.ToHandleChecked().is_identical_to(code)) {
-      return code;
-    }
-    if (maybe_handler_.is_null()) {
+    Handle<Code> handler;
+    if (maybe_handler_.ToHandle(&handler)) {
+      if (!handler.is_identical_to(code)) {
+        TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
+        return code;
+      }
+    } else {
       // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
       // In MEGAMORPHIC case, check if the handler in the megamorphic stub
       // cache (which just missed) is different from the cached handler.
@@ -1094,8 +976,12 @@
         Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
         Code* megamorphic_cached_code =
             isolate()->stub_cache()->Get(*lookup->name(), map, code->flags());
-        if (megamorphic_cached_code != *code) return code;
+        if (megamorphic_cached_code != *code) {
+          TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
+          return code;
+        }
       } else {
+        TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
         return code;
       }
     }
@@ -1103,24 +989,13 @@
 
   code = CompileHandler(lookup, value, flag);
   DCHECK(code->is_handler());
-
-  // TODO(mvstanton): we'd only like to cache code on the map when it's custom
-  // code compiled for this map, otherwise it's already cached in the global
-  // code cache. We are also guarding against installing code with flags that
-  // don't match the desired CacheHolderFlag computed above, which would lead to
-  // invalid lookups later.
-  if (code->type() != Code::NORMAL &&
-      Code::ExtractCacheHolderFromFlags(code->flags()) == flag) {
-    Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
-  }
+  DCHECK(Code::ExtractCacheHolderFromFlags(code->flags()) == flag);
+  Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
 
   return code;
 }
 
-
-Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
-                                    Handle<Object> unused,
-                                    CacheHolderFlag cache_holder) {
+Handle<Code> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
   Handle<Object> receiver = lookup->GetReceiver();
   if (receiver->IsString() &&
       Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
@@ -1130,6 +1005,7 @@
 
   if (receiver->IsStringWrapper() &&
       Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
+    TRACE_HANDLER_STATS(isolate(), LoadIC_StringLengthStub);
     StringLengthStub string_length_stub(isolate());
     return string_length_stub.GetCode();
   }
@@ -1142,6 +1018,7 @@
            ->map()
            ->has_non_instance_prototype()) {
     Handle<Code> stub;
+    TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
     FunctionPrototypeStub function_prototype_stub(isolate());
     return function_prototype_stub.GetCode();
   }
@@ -1150,16 +1027,8 @@
   Handle<JSObject> holder = lookup->GetHolder<JSObject>();
   bool receiver_is_holder = receiver.is_identical_to(holder);
   switch (lookup->state()) {
-    case LookupIterator::INTERCEPTOR: {
-      DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-      NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-      // Perform a lookup behind the interceptor. Copy the LookupIterator since
-      // the original iterator will be used to fetch the value.
-      LookupIterator it = *lookup;
-      it.Next();
-      LookupForRead(&it);
-      return compiler.CompileLoadInterceptor(&it);
-    }
+    case LookupIterator::INTERCEPTOR:
+      break;  // Custom-compiled handler.
 
     case LookupIterator::ACCESSOR: {
       // Use simple field loads for some well-known callback properties.
@@ -1173,6 +1042,7 @@
       }
       if (Accessors::IsJSArrayBufferViewFieldAccessor(map, lookup->name(),
                                                       &object_offset)) {
+        TRACE_HANDLER_STATS(isolate(), LoadIC_ArrayBufferViewLoadFieldStub);
         FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
         ArrayBufferViewLoadFieldStub stub(isolate(), index);
         return stub.GetCode();
@@ -1181,62 +1051,60 @@
       if (IsCompatibleReceiver(lookup, map)) {
         Handle<Object> accessors = lookup->GetAccessors();
         if (accessors->IsAccessorPair()) {
-          if (!holder->HasFastProperties()) break;
-          // When debugging we need to go the slow path to flood the accessor.
-          if (GetSharedFunctionInfo()->HasDebugInfo()) break;
-          Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
-                                isolate());
-          CallOptimization call_optimization(getter);
-          NamedLoadHandlerCompiler compiler(isolate(), map, holder,
-                                            cache_holder);
-          if (call_optimization.is_simple_api_call()) {
-            return compiler.CompileLoadCallback(
-                lookup->name(), call_optimization, lookup->GetAccessorIndex());
+          if (!holder->HasFastProperties()) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+            return slow_stub();
           }
-          int expected_arguments = Handle<JSFunction>::cast(getter)
-                                       ->shared()
-                                       ->internal_formal_parameter_count();
-          return compiler.CompileLoadViaGetter(
-              lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
+          // When debugging we need to go the slow path to flood the accessor.
+          if (GetSharedFunctionInfo()->HasDebugInfo()) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+            return slow_stub();
+          }
+          break;  // Custom-compiled handler.
         } else if (accessors->IsAccessorInfo()) {
           Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
-          if (v8::ToCData<Address>(info->getter()) == 0) break;
-          if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map)) {
-            // This case should be already handled in LoadIC::UpdateCaches.
-            UNREACHABLE();
-            break;
+          if (v8::ToCData<Address>(info->getter()) == nullptr) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+            return slow_stub();
           }
-          if (!holder->HasFastProperties()) break;
-          if (info->is_sloppy() && !receiver->IsJSReceiver()) break;
-          NamedLoadHandlerCompiler compiler(isolate(), map, holder,
-                                            cache_holder);
-          return compiler.CompileLoadCallback(lookup->name(), info);
+          // Ruled out by IsCompatibleReceiver() above.
+          DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map));
+          if (!holder->HasFastProperties()) return slow_stub();
+          if (receiver_is_holder) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
+            int index = lookup->GetAccessorIndex();
+            LoadApiGetterStub stub(isolate(), true, index);
+            return stub.GetCode();
+          }
+          if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+            return slow_stub();
+          }
+          break;  // Custom-compiled handler.
         }
       }
-      break;
+      TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+      return slow_stub();
     }
 
     case LookupIterator::DATA: {
       if (lookup->is_dictionary_holder()) {
-        if (kind() != Code::LOAD_IC) break;
+        if (kind() != Code::LOAD_IC) {
+          TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+          return slow_stub();
+        }
         if (holder->IsJSGlobalObject()) {
-          NamedLoadHandlerCompiler compiler(isolate(), map, holder,
-                                            cache_holder);
-          Handle<PropertyCell> cell = lookup->GetPropertyCell();
-          Handle<Code> code = compiler.CompileLoadGlobal(
-              cell, lookup->name(), lookup->IsConfigurable());
-          // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
-          CacheHolderFlag flag;
-          Handle<Map> stub_holder_map =
-              GetHandlerCacheHolder(map, receiver_is_holder, isolate(), &flag);
-          Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
-          return code;
+          break;  // Custom-compiled handler.
         }
         // There is only one shared stub for loading normalized
         // properties. It does not traverse the prototype chain, so the
         // property must be found in the object for the stub to be
         // applicable.
-        if (!receiver_is_holder) break;
+        if (!receiver_is_holder) {
+          TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+          return slow_stub();
+        }
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormal);
         return isolate()->builtins()->LoadIC_Normal();
       }
 
@@ -1246,22 +1114,21 @@
         if (receiver_is_holder) {
           return SimpleFieldLoad(field);
         }
-        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-        return compiler.CompileLoadField(lookup->name(), field);
+        break;  // Custom-compiled handler.
       }
 
       // -------------- Constant properties --------------
       DCHECK(lookup->property_details().type() == DATA_CONSTANT);
       if (receiver_is_holder) {
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
         LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
         return stub.GetCode();
       }
-      NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-      return compiler.CompileLoadConstant(lookup->name(),
-                                          lookup->GetConstantIndex());
+      break;  // Custom-compiled handler.
     }
 
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
+      TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
       return slow_stub();
     case LookupIterator::ACCESS_CHECK:
     case LookupIterator::JSPROXY:
@@ -1270,6 +1137,133 @@
       UNREACHABLE();
   }
 
+  return Handle<Code>::null();
+}
+
+Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
+                                    Handle<Object> unused,
+                                    CacheHolderFlag cache_holder) {
+  Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+#ifdef DEBUG
+  // Only used by DCHECKs below.
+  Handle<Object> receiver = lookup->GetReceiver();
+  bool receiver_is_holder = receiver.is_identical_to(holder);
+#endif
+  // Non-map-specific handler stubs have already been selected.
+  DCHECK(!receiver->IsString() ||
+         !Name::Equals(isolate()->factory()->length_string(), lookup->name()));
+  DCHECK(!receiver->IsStringWrapper() ||
+         !Name::Equals(isolate()->factory()->length_string(), lookup->name()));
+
+  DCHECK(!(
+      receiver->IsJSFunction() &&
+      Name::Equals(isolate()->factory()->prototype_string(), lookup->name()) &&
+      receiver->IsConstructor() &&
+      !Handle<JSFunction>::cast(receiver)
+           ->map()
+           ->has_non_instance_prototype()));
+
+  Handle<Map> map = receiver_map();
+  switch (lookup->state()) {
+    case LookupIterator::INTERCEPTOR: {
+      DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadInterceptor);
+      NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+      // Perform a lookup behind the interceptor. Copy the LookupIterator since
+      // the original iterator will be used to fetch the value.
+      LookupIterator it = *lookup;
+      it.Next();
+      LookupForRead(&it);
+      return compiler.CompileLoadInterceptor(&it);
+    }
+
+    case LookupIterator::ACCESSOR: {
+#ifdef DEBUG
+      int object_offset;
+      DCHECK(!Accessors::IsJSObjectFieldAccessor(map, lookup->name(),
+                                                 &object_offset));
+      DCHECK(!Accessors::IsJSArrayBufferViewFieldAccessor(map, lookup->name(),
+                                                          &object_offset));
+#endif
+
+      DCHECK(IsCompatibleReceiver(lookup, map));
+      Handle<Object> accessors = lookup->GetAccessors();
+      if (accessors->IsAccessorPair()) {
+        DCHECK(holder->HasFastProperties());
+        DCHECK(!GetSharedFunctionInfo()->HasDebugInfo());
+        Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+                              isolate());
+        CallOptimization call_optimization(getter);
+        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+        if (call_optimization.is_simple_api_call()) {
+          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadCallback);
+          int index = lookup->GetAccessorIndex();
+          Handle<Code> code = compiler.CompileLoadCallback(
+              lookup->name(), call_optimization, index);
+          if (FLAG_runtime_call_stats) return slow_stub();
+          return code;
+        }
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadViaGetter);
+        int expected_arguments = Handle<JSFunction>::cast(getter)
+                                     ->shared()
+                                     ->internal_formal_parameter_count();
+        return compiler.CompileLoadViaGetter(
+            lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
+      } else {
+        DCHECK(accessors->IsAccessorInfo());
+        Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+        DCHECK(v8::ToCData<Address>(info->getter()) != nullptr);
+        DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map));
+        DCHECK(holder->HasFastProperties());
+        DCHECK(!receiver_is_holder);
+        DCHECK(!info->is_sloppy() || receiver->IsJSReceiver());
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadCallback);
+        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+        Handle<Code> code = compiler.CompileLoadCallback(lookup->name(), info);
+        if (FLAG_runtime_call_stats) return slow_stub();
+        return code;
+      }
+      UNREACHABLE();
+    }
+
+    case LookupIterator::DATA: {
+      if (lookup->is_dictionary_holder()) {
+        DCHECK(kind() == Code::LOAD_IC);
+        DCHECK(holder->IsJSGlobalObject());
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
+        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+        Handle<PropertyCell> cell = lookup->GetPropertyCell();
+        Handle<Code> code = compiler.CompileLoadGlobal(
+            cell, lookup->name(), lookup->IsConfigurable());
+        return code;
+      }
+
+      // -------------- Fields --------------
+      if (lookup->property_details().type() == DATA) {
+        FieldIndex field = lookup->GetFieldIndex();
+        DCHECK(!receiver_is_holder);
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadField);
+        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+        return compiler.CompileLoadField(lookup->name(), field);
+      }
+
+      // -------------- Constant properties --------------
+      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
+      DCHECK(!receiver_is_holder);
+      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstant);
+      NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+      return compiler.CompileLoadConstant(lookup->name(),
+                                          lookup->GetConstantIndex());
+    }
+
+    case LookupIterator::INTEGER_INDEXED_EXOTIC:
+    case LookupIterator::ACCESS_CHECK:
+    case LookupIterator::JSPROXY:
+    case LookupIterator::NOT_FOUND:
+    case LookupIterator::TRANSITION:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
   return slow_stub();
 }
 
@@ -1293,11 +1287,10 @@
   return key;
 }
 
-
-Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
-  Handle<Code> null_handle;
+void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
   Handle<Map> receiver_map(receiver->map(), isolate());
-  DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE);  // Checked by caller.
+  DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE &&
+         receiver_map->instance_type() != JS_PROXY_TYPE);  // Checked by caller.
   MapHandleList target_receiver_maps;
   TargetMaps(&target_receiver_maps);
 
@@ -1305,15 +1298,19 @@
     Handle<Code> handler =
         PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
             receiver_map, extra_ic_state());
-    ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
-    return null_handle;
+    return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
   }
 
   for (int i = 0; i < target_receiver_maps.length(); i++) {
-    if (!target_receiver_maps.at(i).is_null() &&
-        target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
+    Handle<Map> map = target_receiver_maps.at(i);
+    if (map.is_null()) continue;
+    if (map->instance_type() == JS_VALUE_TYPE) {
       TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSValue");
-      return megamorphic_stub();
+      return;
+    }
+    if (map->instance_type() == JS_PROXY_TYPE) {
+      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSProxy");
+      return;
     }
   }
 
@@ -1331,8 +1328,7 @@
     Handle<Code> handler =
         PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
             receiver_map, extra_ic_state());
-    ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
-    return null_handle;
+    return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
   }
 
   DCHECK(state() != GENERIC);
@@ -1343,21 +1339,21 @@
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
     TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
-    return megamorphic_stub();
+    return;
   }
 
   // If the maximum number of receiver maps has been exceeded, use the generic
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
     TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
-    return megamorphic_stub();
+    return;
   }
 
   CodeHandleList handlers(target_receiver_maps.length());
+  TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_PolymorphicElement);
   ElementHandlerCompiler compiler(isolate());
   compiler.CompileElementHandlers(&target_receiver_maps, &handlers);
-  ConfigureVectorState(Handle<Name>::null(), &target_receiver_maps, &handlers);
-  return null_handle;
+  ConfigureVectorState(Handle<Name>(), &target_receiver_maps, &handlers);
 }
 
 
@@ -1372,7 +1368,6 @@
   }
 
   Handle<Object> load_handle;
-  Handle<Code> stub = megamorphic_stub();
 
   // Check for non-string values that can be converted into an
   // internalized string directly or is representable as a smi.
@@ -1386,20 +1381,15 @@
              !object->IsJSValue()) {
     if (object->IsJSObject() || (object->IsString() && key->IsNumber())) {
       Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
-      if (object->IsString() || key->IsSmi()) stub = LoadElementStub(receiver);
+      if (object->IsString() || key->IsSmi()) UpdateLoadElement(receiver);
     }
   }
 
-  DCHECK(UseVector());
-  if (!is_vector_set() || stub.is_null()) {
-    Code* generic = *megamorphic_stub();
-    if (!stub.is_null() && *stub == generic) {
-      ConfigureVectorState(MEGAMORPHIC, key);
-      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
-    }
-
-    TRACE_IC("LoadIC", key);
+  if (!is_vector_set()) {
+    ConfigureVectorState(MEGAMORPHIC, key);
+    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
   }
+  TRACE_IC("LoadIC", key);
 
   if (!load_handle.is_null()) return load_handle;
 
@@ -1487,11 +1477,8 @@
   if (kind() == Code::KEYED_STORE_IC && name->AsArrayIndex(&index)) {
     // Rewrite to the generic keyed store stub.
     if (FLAG_use_ic) {
-      if (UseVector()) {
-        ConfigureVectorState(MEGAMORPHIC, name);
-      } else if (!AddressIsDeoptimizedCode()) {
-        set_target(*megamorphic_stub());
-      }
+      DCHECK(UseVector());
+      ConfigureVectorState(MEGAMORPHIC, name);
       TRACE_IC("StoreIC", name);
       TRACE_GENERIC_IC(isolate(), "StoreIC", "name as array index");
     }
@@ -1529,6 +1516,7 @@
 
       if (FLAG_use_ic &&
           StoreScriptContextFieldStub::Accepted(&lookup_result)) {
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreScriptContextFieldStub);
         StoreScriptContextFieldStub stub(isolate(), &lookup_result);
         PatchCache(name, stub.GetCode());
       }
@@ -1554,17 +1542,6 @@
     return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
   }
 
-  // Observed objects are always modified through the runtime.
-  if (object->IsHeapObject() &&
-      Handle<HeapObject>::cast(object)->map()->is_observed()) {
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(), result,
-        Object::SetProperty(object, name, value, language_mode(), store_mode),
-        Object);
-    return result;
-  }
-
   LookupIterator it(object, name);
   if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
 
@@ -1573,14 +1550,6 @@
   return value;
 }
 
-Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
-                                     ConvertReceiverMode mode,
-                                     TailCallMode tail_call_mode) {
-  CallICTrampolineStub stub(isolate, CallICState(argc, mode, tail_call_mode));
-  Handle<Code> code = stub.GetCode();
-  return code;
-}
-
 Handle<Code> CallIC::initialize_stub_in_optimized_code(
     Isolate* isolate, int argc, ConvertReceiverMode mode,
     TailCallMode tail_call_mode) {
@@ -1590,26 +1559,6 @@
 }
 
 
-static Handle<Code> StoreICInitializeStubHelper(
-    Isolate* isolate, ExtraICState extra_state,
-    InlineCacheState initialization_state) {
-  Handle<Code> ic = PropertyICCompiler::ComputeStore(
-      isolate, initialization_state, extra_state);
-  return ic;
-}
-
-
-Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
-                                      LanguageMode language_mode,
-                                      State initialization_state) {
-  DCHECK(initialization_state == UNINITIALIZED ||
-         initialization_state == PREMONOMORPHIC ||
-         initialization_state == MEGAMORPHIC);
-  VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
-  return stub.GetCode();
-}
-
-
 Handle<Code> StoreIC::initialize_stub_in_optimized_code(
     Isolate* isolate, LanguageMode language_mode, State initialization_state) {
   DCHECK(initialization_state == UNINITIALIZED ||
@@ -1620,40 +1569,13 @@
     return stub.GetCode();
   }
 
-  return StoreICInitializeStubHelper(
-      isolate, ComputeExtraICState(language_mode), initialization_state);
+  return is_strict(language_mode)
+             ? isolate->builtins()->StoreIC_Megamorphic_Strict()
+             : isolate->builtins()->StoreIC_Megamorphic();
 }
 
-
-Handle<Code> StoreIC::megamorphic_stub() {
-  if (kind() == Code::STORE_IC) {
-    return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
-                                            extra_ic_state());
-  } else {
-    DCHECK(kind() == Code::KEYED_STORE_IC);
-    if (is_strict(language_mode())) {
-      return isolate()->builtins()->KeyedStoreIC_Megamorphic_Strict();
-    } else {
-      return isolate()->builtins()->KeyedStoreIC_Megamorphic();
-    }
-  }
-}
-
-
 Handle<Code> StoreIC::slow_stub() const {
-  if (kind() == Code::STORE_IC) {
-    return isolate()->builtins()->StoreIC_Slow();
-  } else {
-    DCHECK(kind() == Code::KEYED_STORE_IC);
-    return isolate()->builtins()->KeyedStoreIC_Slow();
-  }
-}
-
-
-Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
-                                           LanguageMode language_mode) {
-  ExtraICState state = ComputeExtraICState(language_mode);
-  return PropertyICCompiler::ComputeStore(isolate, PREMONOMORPHIC, state);
+  return isolate()->builtins()->StoreIC_Slow();
 }
 
 
@@ -1693,6 +1615,135 @@
   return code;
 }
 
+Handle<Code> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
+  DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
+
+  // This is currently guaranteed by checks in StoreIC::Store.
+  Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
+  Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+  DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
+
+  switch (lookup->state()) {
+    case LookupIterator::TRANSITION: {
+      auto store_target = lookup->GetStoreTarget();
+      if (store_target->IsJSGlobalObject()) {
+        break;  // Custom-compiled handler.
+      }
+      // Currently not handled by CompileStoreTransition.
+      if (!holder->HasFastProperties()) {
+        TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
+        TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+        return slow_stub();
+      }
+
+      DCHECK(lookup->IsCacheableTransition());
+      break;  // Custom-compiled handler.
+    }
+
+    case LookupIterator::INTERCEPTOR: {
+      DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
+      TRACE_HANDLER_STATS(isolate(), StoreIC_StoreInterceptorStub);
+      StoreInterceptorStub stub(isolate());
+      return stub.GetCode();
+    }
+
+    case LookupIterator::ACCESSOR: {
+      if (!holder->HasFastProperties()) {
+        TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
+        TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+        return slow_stub();
+      }
+      Handle<Object> accessors = lookup->GetAccessors();
+      if (accessors->IsAccessorInfo()) {
+        Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+        if (v8::ToCData<Address>(info->setter()) == nullptr) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == nullptr");
+          TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+          return slow_stub();
+        }
+        if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
+            !lookup->HolderIsReceiverOrHiddenPrototype()) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC",
+                           "special data property in prototype chain");
+          TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+          return slow_stub();
+        }
+        if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
+                                                   receiver_map())) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
+          TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+          return slow_stub();
+        }
+        if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+          TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+          return slow_stub();
+        }
+        break;  // Custom-compiled handler.
+      } else if (accessors->IsAccessorPair()) {
+        Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
+                              isolate());
+        if (!setter->IsJSFunction() && !setter->IsFunctionTemplateInfo()) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
+          TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+          return slow_stub();
+        }
+        CallOptimization call_optimization(setter);
+        if (call_optimization.is_simple_api_call()) {
+          if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
+            break;  // Custom-compiled handler.
+          }
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver");
+          TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+          return slow_stub();
+        }
+        break;  // Custom-compiled handler.
+      }
+      TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+      return slow_stub();
+    }
+
+    case LookupIterator::DATA: {
+      if (lookup->is_dictionary_holder()) {
+        if (holder->IsJSGlobalObject()) {
+          break;  // Custom-compiled handler.
+        }
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreNormal);
+        DCHECK(holder.is_identical_to(receiver));
+        return isolate()->builtins()->StoreIC_Normal();
+      }
+
+      // -------------- Fields --------------
+      if (lookup->property_details().type() == DATA) {
+        bool use_stub = true;
+        if (lookup->representation().IsHeapObject()) {
+          // Only use a generic stub if no types need to be tracked.
+          Handle<FieldType> field_type = lookup->GetFieldType();
+          use_stub = !field_type->IsClass();
+        }
+        if (use_stub) {
+          TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
+          StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                              lookup->representation());
+          return stub.GetCode();
+        }
+        break;  // Custom-compiled handler.
+      }
+
+      // -------------- Constant properties --------------
+      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
+      TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
+      TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+      return slow_stub();
+    }
+
+    case LookupIterator::INTEGER_INDEXED_EXOTIC:
+    case LookupIterator::ACCESS_CHECK:
+    case LookupIterator::JSPROXY:
+    case LookupIterator::NOT_FOUND:
+      UNREACHABLE();
+  }
+  return Handle<Code>::null();
+}
 
 Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
                                      Handle<Object> value,
@@ -1709,6 +1760,7 @@
       auto store_target = lookup->GetStoreTarget();
       if (store_target->IsJSGlobalObject()) {
         // TODO(dcarney): this currently just deopts. Use the transition cell.
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalTransition);
         auto cell = isolate()->factory()->NewPropertyCell();
         cell->set_value(*value);
         auto code = PropertyCellStoreHandler(
@@ -1719,111 +1771,94 @@
       }
       Handle<Map> transition = lookup->transition_map();
       // Currently not handled by CompileStoreTransition.
-      if (!holder->HasFastProperties()) {
-        TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
-        break;
-      }
+      DCHECK(holder->HasFastProperties());
 
       DCHECK(lookup->IsCacheableTransition());
+      TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransition);
       NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
       return compiler.CompileStoreTransition(transition, lookup->name());
     }
 
-    case LookupIterator::INTERCEPTOR: {
-      DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
-      return CodeFactory::StoreInterceptor(isolate()).code();
-    }
+    case LookupIterator::INTERCEPTOR:
+      UNREACHABLE();
 
     case LookupIterator::ACCESSOR: {
-      if (!holder->HasFastProperties()) {
-        TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
-        break;
-      }
+      DCHECK(holder->HasFastProperties());
       Handle<Object> accessors = lookup->GetAccessors();
       if (accessors->IsAccessorInfo()) {
         Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
-        if (v8::ToCData<Address>(info->setter()) == 0) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
-          break;
-        }
-        if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
-            !lookup->HolderIsReceiverOrHiddenPrototype()) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC",
-                           "special data property in prototype chain");
-          break;
-        }
-        if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
-                                                   receiver_map())) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
-          break;
-        }
-        if (info->is_sloppy() && !receiver->IsJSReceiver()) break;
+        DCHECK(v8::ToCData<Address>(info->setter()) != 0);
+        DCHECK(!AccessorInfo::cast(*accessors)->is_special_data_property() ||
+               lookup->HolderIsReceiverOrHiddenPrototype());
+        DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
+                                                     receiver_map()));
+        DCHECK(!info->is_sloppy() || receiver->IsJSReceiver());
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
         NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
-        return compiler.CompileStoreCallback(receiver, lookup->name(), info,
-                                             language_mode());
-      } else if (accessors->IsAccessorPair()) {
+        Handle<Code> code = compiler.CompileStoreCallback(
+            receiver, lookup->name(), info, language_mode());
+        if (FLAG_runtime_call_stats) return slow_stub();
+        return code;
+      } else {
+        DCHECK(accessors->IsAccessorPair());
         Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
                               isolate());
-        if (!setter->IsJSFunction()) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
-          break;
-        }
-        Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
-        CallOptimization call_optimization(function);
+        DCHECK(setter->IsJSFunction() || setter->IsFunctionTemplateInfo());
+        CallOptimization call_optimization(setter);
         NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
-        if (call_optimization.is_simple_api_call() &&
-            call_optimization.IsCompatibleReceiver(receiver, holder)) {
-          return compiler.CompileStoreCallback(receiver, lookup->name(),
-                                               call_optimization,
-                                               lookup->GetAccessorIndex());
+        if (call_optimization.is_simple_api_call()) {
+          DCHECK(call_optimization.IsCompatibleReceiver(receiver, holder));
+          TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
+          Handle<Code> code = compiler.CompileStoreCallback(
+              receiver, lookup->name(), call_optimization,
+              lookup->GetAccessorIndex());
+          if (FLAG_runtime_call_stats) return slow_stub();
+          return code;
         }
-        int expected_arguments =
-            function->shared()->internal_formal_parameter_count();
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreViaSetter);
+        int expected_arguments = JSFunction::cast(*setter)
+                                     ->shared()
+                                     ->internal_formal_parameter_count();
         return compiler.CompileStoreViaSetter(receiver, lookup->name(),
                                               lookup->GetAccessorIndex(),
                                               expected_arguments);
       }
-      break;
     }
 
     case LookupIterator::DATA: {
       if (lookup->is_dictionary_holder()) {
-        if (holder->IsJSGlobalObject()) {
-          DCHECK(holder.is_identical_to(receiver) ||
-                 receiver->map()->prototype() == *holder);
-          auto cell = lookup->GetPropertyCell();
-          auto updated_type = PropertyCell::UpdatedType(
-              cell, value, lookup->property_details());
-          auto code = PropertyCellStoreHandler(
-              isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
-              lookup->name(), cell, updated_type);
-          return code;
-        }
-        DCHECK(holder.is_identical_to(receiver));
-        return isolate()->builtins()->StoreIC_Normal();
+        DCHECK(holder->IsJSGlobalObject());
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobal);
+        DCHECK(holder.is_identical_to(receiver) ||
+               receiver->map()->prototype() == *holder);
+        auto cell = lookup->GetPropertyCell();
+        auto updated_type =
+            PropertyCell::UpdatedType(cell, value, lookup->property_details());
+        auto code = PropertyCellStoreHandler(
+            isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
+            lookup->name(), cell, updated_type);
+        return code;
       }
 
       // -------------- Fields --------------
       if (lookup->property_details().type() == DATA) {
+#ifdef DEBUG
         bool use_stub = true;
         if (lookup->representation().IsHeapObject()) {
           // Only use a generic stub if no types need to be tracked.
           Handle<FieldType> field_type = lookup->GetFieldType();
           use_stub = !field_type->IsClass();
         }
-        if (use_stub) {
-          StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
-                              lookup->representation());
-          return stub.GetCode();
-        }
+        DCHECK(!use_stub);
+#endif
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreField);
         NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
         return compiler.CompileStoreField(lookup);
       }
 
       // -------------- Constant properties --------------
       DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
-      break;
+      UNREACHABLE();
     }
 
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1832,22 +1867,12 @@
     case LookupIterator::NOT_FOUND:
       UNREACHABLE();
   }
+  UNREACHABLE();
   return slow_stub();
 }
 
-
-Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
-                                            KeyedAccessStoreMode store_mode) {
-  Handle<Code> null_handle;
-  // Don't handle megamorphic property accesses for INTERCEPTORS or
-  // ACCESSOR_CONSTANT
-  // via megamorphic stubs, since they don't have a map in their relocation info
-  // and so the stubs can't be harvested for the object needed for a map check.
-  if (target()->type() != Code::NORMAL) {
-    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-NORMAL target type");
-    return megamorphic_stub();
-  }
-
+void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
+                                      KeyedAccessStoreMode store_mode) {
   MapHandleList target_receiver_maps;
   TargetMaps(&target_receiver_maps);
   if (target_receiver_maps.length() == 0) {
@@ -1855,10 +1880,17 @@
         ComputeTransitionedMap(receiver_map, store_mode);
     store_mode = GetNonTransitioningStoreMode(store_mode);
     Handle<Code> handler =
-        PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
-            monomorphic_map, language_mode(), store_mode);
-    ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler);
-    return null_handle;
+        PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(monomorphic_map,
+                                                                store_mode);
+    return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
+  }
+
+  for (int i = 0; i < target_receiver_maps.length(); i++) {
+    if (!target_receiver_maps.at(i).is_null() &&
+        target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "JSValue");
+      return;
+    }
   }
 
   // There are several special cases where an IC that is MONOMORPHIC can still
@@ -1883,23 +1915,22 @@
       store_mode = GetNonTransitioningStoreMode(store_mode);
       Handle<Code> handler =
           PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
-              transitioned_receiver_map, language_mode(), store_mode);
-      ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map,
-                           handler);
-      return null_handle;
-    } else if (receiver_map.is_identical_to(previous_receiver_map) &&
-               old_store_mode == STANDARD_STORE &&
-               (store_mode == STORE_AND_GROW_NO_TRANSITION ||
-                store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-                store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
+              transitioned_receiver_map, store_mode);
+      ConfigureVectorState(Handle<Name>(), transitioned_receiver_map, handler);
+      return;
+    }
+    if (receiver_map.is_identical_to(previous_receiver_map) &&
+        old_store_mode == STANDARD_STORE &&
+        (store_mode == STORE_AND_GROW_NO_TRANSITION ||
+         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+         store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
       // A "normal" IC that handles stores can switch to a version that can
       // grow at the end of the array, handle OOB accesses or copy COW arrays
       // and still stay MONOMORPHIC.
       Handle<Code> handler =
-          PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
-              receiver_map, language_mode(), store_mode);
-      ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
-      return null_handle;
+          PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(receiver_map,
+                                                                  store_mode);
+      return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
     }
   }
 
@@ -1919,14 +1950,12 @@
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the megamorphic stub which can handle everything.
     TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
-    return megamorphic_stub();
+    return;
   }
 
   // If the maximum number of receiver maps has been exceeded, use the
   // megamorphic version of the IC.
-  if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
-    return megamorphic_stub();
-  }
+  if (target_receiver_maps.length() > kMaxKeyedPolymorphism) return;
 
   // Make sure all polymorphic handlers have the same store mode, otherwise the
   // megamorphic stub must be used.
@@ -1936,7 +1965,7 @@
       store_mode = old_store_mode;
     } else if (store_mode != old_store_mode) {
       TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
-      return megamorphic_stub();
+      return;
     }
   }
 
@@ -1954,17 +1983,16 @@
         external_arrays != target_receiver_maps.length()) {
       TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
                        "unsupported combination of external and normal arrays");
-      return megamorphic_stub();
+      return;
     }
   }
 
+  TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_Polymorphic);
   MapHandleList transitioned_maps(target_receiver_maps.length());
   CodeHandleList handlers(target_receiver_maps.length());
   PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
-      &target_receiver_maps, &transitioned_maps, &handlers, store_mode,
-      language_mode());
+      &target_receiver_maps, &transitioned_maps, &handlers, store_mode);
   ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
-  return null_handle;
 }
 
 
@@ -2077,7 +2105,6 @@
   key = TryConvertKey(key, isolate());
 
   Handle<Object> store_handle;
-  Handle<Code> stub = megamorphic_stub();
 
   uint32_t index;
   if ((key->IsInternalizedString() &&
@@ -2097,10 +2124,8 @@
     return store_handle;
   }
 
-  bool use_ic =
-      FLAG_use_ic && !object->IsStringWrapper() &&
-      !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy() &&
-      !(object->IsJSObject() && JSObject::cast(*object)->map()->is_observed());
+  bool use_ic = FLAG_use_ic && !object->IsStringWrapper() &&
+                !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy();
   if (use_ic && !object->IsSmi()) {
     // Don't use ICs for maps of the objects in Array's prototype chain. We
     // expect to be able to trap element sets to objects with those maps in
@@ -2148,7 +2173,7 @@
         // other non-dictionary receivers in the polymorphic case benefit
         // from fast path keyed stores.
         if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
-          stub = StoreElementStub(old_receiver_map, store_mode);
+          UpdateStoreElement(old_receiver_map, store_mode);
         } else {
           TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
                            "dictionary or proxy prototype");
@@ -2161,13 +2186,9 @@
     }
   }
 
-  if (!is_vector_set() || stub.is_null()) {
-    Code* megamorphic = *megamorphic_stub();
-    if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) {
-      ConfigureVectorState(MEGAMORPHIC, key);
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
-                       *stub == megamorphic ? "set generic" : "slow stub");
-    }
+  if (!is_vector_set()) {
+    ConfigureVectorState(MEGAMORPHIC, key);
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
   }
   TRACE_IC("StoreIC", key);
 
@@ -2512,7 +2533,28 @@
 MaybeHandle<Object> BinaryOpIC::Transition(
     Handle<AllocationSite> allocation_site, Handle<Object> left,
     Handle<Object> right) {
-  BinaryOpICState state(isolate(), target()->extra_ic_state());
+  BinaryOpICState state(isolate(), extra_ic_state());
+
+#ifdef V8_TARGET_ARCH_X64
+  // Crash instrumentation for crbug.com/621147.
+  uintptr_t left_raw = reinterpret_cast<uintptr_t>(*left);
+  uintptr_t hole_raw =
+      reinterpret_cast<uintptr_t>(isolate()->heap()->the_hole_value());
+  if ((hole_raw & ((1ull << 32) - 1)) == (left_raw & ((1ull << 32) - 1))) {
+    Code* c = GetCode();
+    Code::Kind kind = c->kind();
+    int instruction_size = c->instruction_size() + 2 * sizeof(Address);
+    byte* instructions = static_cast<byte*>(alloca(instruction_size));
+    Address* start = reinterpret_cast<Address*>(instructions);
+    start[0] = fp();
+    start[1] = pc();
+    for (int i = 2 * sizeof(Address); i < instruction_size; i++) {
+      instructions[i] = c->instruction_start()[i];
+    }
+    isolate()->PushStackTraceAndDie(0xBAAAAAAD, instructions, fp(),
+                                    static_cast<unsigned int>(kind));
+  }
+#endif  // V8_TARGET_ARCH_X64
 
   // Compute the actual result using the builtin for the binary operation.
   Handle<Object> result;
@@ -2576,16 +2618,12 @@
     return result;
   }
 
-  // Execution::Call can execute arbitrary JavaScript, hence potentially
-  // update the state of this very IC, so we must update the stored state.
-  UpdateTarget();
-
   // Compute the new state.
   BinaryOpICState old_state(isolate(), target()->extra_ic_state());
   state.Update(left, right, result);
 
   // Check if we have a string operation here.
-  Handle<Code> target;
+  Handle<Code> new_target;
   if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
     // Setup the allocation site on-demand.
     if (allocation_site.is_null()) {
@@ -2594,24 +2632,24 @@
 
     // Install the stub with an allocation site.
     BinaryOpICWithAllocationSiteStub stub(isolate(), state);
-    target = stub.GetCodeCopyFromTemplate(allocation_site);
+    new_target = stub.GetCodeCopyFromTemplate(allocation_site);
 
     // Sanity check the trampoline stub.
-    DCHECK_EQ(*allocation_site, target->FindFirstAllocationSite());
+    DCHECK_EQ(*allocation_site, new_target->FindFirstAllocationSite());
   } else {
     // Install the generic stub.
     BinaryOpICStub stub(isolate(), state);
-    target = stub.GetCode();
+    new_target = stub.GetCode();
 
     // Sanity check the generic stub.
-    DCHECK_NULL(target->FindFirstAllocationSite());
+    DCHECK_NULL(new_target->FindFirstAllocationSite());
   }
-  set_target(*target);
+  set_target(*new_target);
 
   if (FLAG_trace_ic) {
     OFStream os(stdout);
     os << "[BinaryOpIC" << old_state << " => " << state << " @ "
-       << static_cast<void*>(*target) << " <- ";
+       << static_cast<void*>(*new_target) << " <- ";
     JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
     if (!allocation_site.is_null()) {
       os << " using allocation site " << static_cast<void*>(*allocation_site);
@@ -2739,7 +2777,7 @@
 
 
 Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
-  ToBooleanICStub stub(isolate(), target()->extra_ic_state());
+  ToBooleanICStub stub(isolate(), extra_ic_state());
   bool to_boolean_value = stub.UpdateStatus(object);
   Handle<Code> code = stub.GetCode();
   set_target(*code);
diff --git a/src/ic/ic.h b/src/ic/ic.h
index 8bd2f44..5dae179 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -35,11 +35,11 @@
   // Compute the current IC state based on the target stub, receiver and name.
   void UpdateState(Handle<Object> receiver, Handle<Object> name);
 
-  bool IsNameCompatibleWithPrototypeFailure(Handle<Object> name);
-  void MarkPrototypeFailure(Handle<Object> name) {
-    DCHECK(IsNameCompatibleWithPrototypeFailure(name));
+  bool RecomputeHandlerForName(Handle<Object> name);
+  void MarkRecomputeHandler(Handle<Object> name) {
+    DCHECK(RecomputeHandlerForName(name));
     old_state_ = state_;
-    state_ = PROTOTYPE_FAILURE;
+    state_ = RECOMPUTE_HANDLER;
   }
 
   // Clear the inline cache to initial state.
@@ -47,14 +47,12 @@
 
 #ifdef DEBUG
   bool IsLoadStub() const {
-    return target()->is_load_stub() || target()->is_keyed_load_stub();
+    return kind_ == Code::LOAD_IC || kind_ == Code::KEYED_LOAD_IC;
   }
-
   bool IsStoreStub() const {
-    return target()->is_store_stub() || target()->is_keyed_store_stub();
+    return kind_ == Code::STORE_IC || kind_ == Code::KEYED_STORE_IC;
   }
-
-  bool IsCallStub() const { return target()->is_call_stub(); }
+  bool IsCallStub() const { return kind_ == Code::CALL_IC; }
 #endif
 
   static inline Handle<Map> GetHandlerCacheHolder(Handle<Map> receiver_map,
@@ -82,9 +80,6 @@
   }
 
  protected:
-  // Get the call-site target; used for determining the state.
-  Handle<Code> target() const { return target_; }
-
   Address fp() const { return fp_; }
   Address pc() const { return *pc_address_; }
   Isolate* isolate() const { return isolate_; }
@@ -101,13 +96,12 @@
 
   // Set the call-site target.
   inline void set_target(Code* code);
-  bool is_target_set() { return target_set_; }
   bool is_vector_set() { return vector_set_; }
 
   bool UseVector() const {
     bool use = ICUseVector(kind());
     // If we are supposed to use the nexus, verify the nexus is non-null.
-    DCHECK(!use || nexus_ != NULL);
+    DCHECK(!use || nexus_ != nullptr);
     return use;
   }
 
@@ -139,9 +133,6 @@
                                          Address constant_pool);
   static inline void SetTargetAtAddress(Address address, Code* target,
                                         Address constant_pool);
-  static void OnTypeFeedbackChanged(Isolate* isolate, Address address,
-                                    State old_state, State new_state,
-                                    bool target_remains_ic_stub);
   // As a vector-based IC, type feedback must be updated differently.
   static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
   static void PostPatching(Address address, Code* target, Code* old_target);
@@ -149,6 +140,10 @@
   // Compute the handler either by compiling or by retrieving a cached version.
   Handle<Code> ComputeHandler(LookupIterator* lookup,
                               Handle<Object> value = Handle<Code>::null());
+  virtual Handle<Code> GetMapIndependentHandler(LookupIterator* lookup) {
+    UNREACHABLE();
+    return Handle<Code>::null();
+  }
   virtual Handle<Code> CompileHandler(LookupIterator* lookup,
                                       Handle<Object> value,
                                       CacheHolderFlag cache_holder) {
@@ -164,22 +159,18 @@
   bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
   void PatchCache(Handle<Name> name, Handle<Code> code);
   Code::Kind kind() const { return kind_; }
+  bool is_keyed() const {
+    return kind_ == Code::KEYED_LOAD_IC || kind_ == Code::KEYED_STORE_IC;
+  }
   Code::Kind handler_kind() const {
     if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC;
     DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC ||
            kind_ == Code::KEYED_STORE_IC);
     return kind_;
   }
-  virtual Handle<Code> megamorphic_stub() {
-    UNREACHABLE();
-    return Handle<Code>::null();
-  }
-
-  bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
-                                              Handle<String> name);
+  bool ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name);
 
   ExtraICState extra_ic_state() const { return extra_ic_state_; }
-  void set_extra_ic_state(ExtraICState state) { extra_ic_state_ = state; }
 
   Handle<Map> receiver_map() { return receiver_map_; }
   void update_receiver_map(Handle<Object> receiver) {
@@ -202,12 +193,10 @@
     return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
   }
 
-  inline void UpdateTarget();
-
   Handle<TypeFeedbackVector> vector() const { return nexus()->vector_handle(); }
   FeedbackVectorSlot slot() const { return nexus()->slot(); }
   State saved_state() const {
-    return state() == PROTOTYPE_FAILURE ? old_state_ : state();
+    return state() == RECOMPUTE_HANDLER ? old_state_ : state();
   }
 
   template <class NexusClass>
@@ -217,25 +206,17 @@
   FeedbackNexus* nexus() const { return nexus_; }
 
   inline Code* get_host();
+  inline Code* target() const;
 
  private:
-  inline Code* raw_target() const;
   inline Address constant_pool() const;
   inline Address raw_constant_pool() const;
 
   void FindTargetMaps() {
     if (target_maps_set_) return;
     target_maps_set_ = true;
-    if (UseVector()) {
-      nexus()->ExtractMaps(&target_maps_);
-    } else {
-      if (state_ == MONOMORPHIC) {
-        Map* map = target_->FindFirstMap();
-        if (map != NULL) target_maps_.Add(handle(map));
-      } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
-        target_->FindAllMaps(&target_maps_);
-      }
-    }
+    DCHECK(UseVector());
+    nexus()->ExtractMaps(&target_maps_);
   }
 
   // Frame pointer for the frame that uses (calls) the IC.
@@ -253,9 +234,6 @@
 
   Isolate* isolate_;
 
-  // The original code target that missed.
-  Handle<Code> target_;
-  bool target_set_;
   bool vector_set_;
   State old_state_;  // For saving if we marked as prototype failure.
   State state_;
@@ -283,9 +261,6 @@
   void HandleMiss(Handle<Object> function);
 
   // Code generator routines.
-  static Handle<Code> initialize_stub(Isolate* isolate, int argc,
-                                      ConvertReceiverMode mode,
-                                      TailCallMode tail_call_mode);
   static Handle<Code> initialize_stub_in_optimized_code(
       Isolate* isolate, int argc, ConvertReceiverMode mode,
       TailCallMode tail_call_mode);
@@ -312,13 +287,10 @@
 
   // Code generator routines.
 
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateRuntimeGetProperty(MacroAssembler* masm);
   static void GenerateNormal(MacroAssembler* masm);
 
-  static Handle<Code> initialize_stub(Isolate* isolate,
-                                      ExtraICState extra_state);
   static Handle<Code> initialize_stub_in_optimized_code(
       Isolate* isolate, ExtraICState extra_state, State initialization_state);
 
@@ -328,32 +300,22 @@
   static void Clear(Isolate* isolate, Code* host, LoadICNexus* nexus);
 
  protected:
-  inline void set_target(Code* code);
-
   Handle<Code> slow_stub() const {
-    if (kind() == Code::LOAD_IC) {
-      return isolate()->builtins()->LoadIC_Slow();
-    } else {
-      DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
-      return isolate()->builtins()->KeyedLoadIC_Slow();
-    }
+    return isolate()->builtins()->LoadIC_Slow();
   }
 
-  Handle<Code> megamorphic_stub() override;
-
   // Update the inline cache and the global stub cache based on the
   // lookup result.
   void UpdateCaches(LookupIterator* lookup);
 
+  Handle<Code> GetMapIndependentHandler(LookupIterator* lookup) override;
+
   Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
                               CacheHolderFlag cache_holder) override;
 
  private:
   Handle<Code> SimpleFieldLoad(FieldIndex index);
 
-  static void Clear(Isolate* isolate, Address address, Code* target,
-                    Address constant_pool);
-
   friend class IC;
 };
 
@@ -364,7 +326,6 @@
               KeyedLoadICNexus* nexus = NULL)
       : LoadIC(depth, isolate, nexus) {
     DCHECK(nexus != NULL);
-    DCHECK(target()->is_keyed_load_stub());
   }
 
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
@@ -373,18 +334,8 @@
   // Code generator routines.
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
   static void GenerateMegamorphic(MacroAssembler* masm);
 
-  // Bit mask to be tested against bit field for the cases when
-  // generic stub should go into slow case.
-  // Access check is necessary explicitly since generic stub does not perform
-  // map checks.
-  static const int kSlowCaseBitFieldMask =
-      (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
-
-  static Handle<Code> initialize_stub(Isolate* isolate,
-                                      ExtraICState extra_state);
   static Handle<Code> initialize_stub_in_optimized_code(
       Isolate* isolate, State initialization_state, ExtraICState extra_state);
   static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
@@ -394,22 +345,15 @@
 
  protected:
   // receiver is HeapObject because it could be a String or a JSObject
-  Handle<Code> LoadElementStub(Handle<HeapObject> receiver);
+  void UpdateLoadElement(Handle<HeapObject> receiver);
 
  private:
-  static void Clear(Isolate* isolate, Address address, Code* target,
-                    Address constant_pool);
-
   friend class IC;
 };
 
 
 class StoreIC : public IC {
  public:
-  static ExtraICState ComputeExtraICState(LanguageMode flag) {
-    return StoreICState(flag).GetExtraICState();
-  }
-
   StoreIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
       : IC(depth, isolate, nexus) {
     DCHECK(IsStoreStub());
@@ -421,19 +365,12 @@
 
   // Code generators for stub routines. Only called once at startup.
   static void GenerateSlow(MacroAssembler* masm);
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm);
-  }
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateMegamorphic(MacroAssembler* masm);
   static void GenerateNormal(MacroAssembler* masm);
   static void GenerateRuntimeSetProperty(MacroAssembler* masm,
                                          LanguageMode language_mode);
 
-  static Handle<Code> initialize_stub(Isolate* isolate,
-                                      LanguageMode language_mode,
-                                      State initialization_state);
   static Handle<Code> initialize_stub_in_optimized_code(
       Isolate* isolate, LanguageMode language_mode, State initialization_state);
 
@@ -449,29 +386,17 @@
 
  protected:
   // Stub accessors.
-  Handle<Code> megamorphic_stub() override;
   Handle<Code> slow_stub() const;
 
-  virtual Handle<Code> pre_monomorphic_stub() const {
-    return pre_monomorphic_stub(isolate(), language_mode());
-  }
-
-  static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
-                                           LanguageMode language_mode);
-
   // Update the inline cache and the global stub cache based on the
   // lookup result.
   void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
                     JSReceiver::StoreFromKeyed store_mode);
+  Handle<Code> GetMapIndependentHandler(LookupIterator* lookup) override;
   Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> value,
                               CacheHolderFlag cache_holder) override;
 
  private:
-  inline void set_target(Code* code);
-
-  static void Clear(Isolate* isolate, Address address, Code* target,
-                    Address constant_pool);
-
   friend class IC;
 };
 
@@ -484,50 +409,24 @@
 
 class KeyedStoreIC : public StoreIC {
  public:
-  // ExtraICState bits (building on IC)
-  // ExtraICState bits
-  // When more language modes are added, these BitFields need to move too.
-  STATIC_ASSERT(i::LANGUAGE_END == 3);
-  class ExtraICStateKeyedAccessStoreMode
-      : public BitField<KeyedAccessStoreMode, 3, 3> {};  // NOLINT
-
-  class IcCheckTypeField : public BitField<IcCheckType, 6, 1> {};
-
-  static ExtraICState ComputeExtraICState(LanguageMode flag,
-                                          KeyedAccessStoreMode mode) {
-    return StoreICState(flag).GetExtraICState() |
-           ExtraICStateKeyedAccessStoreMode::encode(mode) |
-           IcCheckTypeField::encode(ELEMENT);
-  }
-
   KeyedAccessStoreMode GetKeyedAccessStoreMode() {
     return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
   }
 
   KeyedStoreIC(FrameDepth depth, Isolate* isolate,
                KeyedStoreICNexus* nexus = NULL)
-      : StoreIC(depth, isolate, nexus) {
-    DCHECK(target()->is_keyed_store_stub());
-  }
+      : StoreIC(depth, isolate, nexus) {}
 
   MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
                                             Handle<Object> name,
                                             Handle<Object> value);
 
   // Code generators for stub routines.  Only called once at startup.
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm);
-  }
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateSlow(MacroAssembler* masm);
   static void GenerateMegamorphic(MacroAssembler* masm,
                                   LanguageMode language_mode);
 
-  static Handle<Code> initialize_stub(Isolate* isolate,
-                                      LanguageMode language_mode,
-                                      State initialization_state);
-
   static Handle<Code> initialize_stub_in_optimized_code(
       Isolate* isolate, LanguageMode language_mode, State initialization_state);
   static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
@@ -536,27 +435,10 @@
   static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
 
  protected:
-  virtual Handle<Code> pre_monomorphic_stub() const {
-    return pre_monomorphic_stub(isolate(), language_mode());
-  }
-  static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
-                                           LanguageMode language_mode) {
-    if (is_strict(language_mode)) {
-      return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
-    } else {
-      return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
-    }
-  }
-
-  Handle<Code> StoreElementStub(Handle<Map> receiver_map,
-                                KeyedAccessStoreMode store_mode);
+  void UpdateStoreElement(Handle<Map> receiver_map,
+                          KeyedAccessStoreMode store_mode);
 
  private:
-  inline void set_target(Code* code);
-
-  static void Clear(Isolate* isolate, Address address, Code* target,
-                    Address constant_pool);
-
   Handle<Map> ComputeTransitionedMap(Handle<Map> map,
                                      KeyedAccessStoreMode store_mode);
 
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
index b924bda..847782e 100644
--- a/src/ic/mips/handler-compiler-mips.cc
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -584,66 +584,6 @@
 }
 
 
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  // Here and below +1 is for name() pushed after the args_ array.
-  typedef PropertyCallbackArguments PCA;
-  __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
-  __ sw(receiver(), MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ li(scratch2(), data);
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch2(), cell);
-  }
-  __ sw(scratch2(), MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
-  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
-  __ sw(scratch2(),
-        MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
-  __ sw(scratch2(), MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
-                                       kPointerSize));
-  __ li(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
-  __ sw(scratch2(), MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
-  __ sw(reg, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
-  // should_throw_on_error -> false
-  DCHECK(Smi::FromInt(0) == nullptr);
-  __ sw(zero_reg,
-        MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
-
-  __ sw(name(), MemOperand(sp, 0 * kPointerSize));
-
-  // Abi for CallApiGetter.
-  Register getter_address_reg = ApiGetterDescriptor::function_address();
-
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ li(getter_address_reg, Operand(ref));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -737,7 +677,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -778,7 +718,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index ae3615e..6c44918 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -419,9 +419,9 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ li(slot, Operand(Smi::FromInt(slot_index)));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key, t0, t1, t2, t5);
   // Cache miss.
   GenerateMiss(masm);
@@ -616,11 +616,10 @@
   __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
   __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ And(t0, t0,
-         Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
   __ Branch(&slow, ne, t0, Operand(zero_reg));
   // Check if the object is a JS array or not.
   __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -663,10 +662,10 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ li(slot, Operand(Smi::FromInt(slot_index)));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, key, t1, t2, t4, t5);
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, t1, t2, t4, t5);
   // Cache miss.
   __ Branch(&miss);
 
@@ -737,10 +736,10 @@
   DCHECK(StoreDescriptor::ValueRegister().is(a0));
 
   // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, a3, t0, t1, t2);
+                                               receiver, name, t1, t2, t3, t4);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
diff --git a/src/ic/mips/stub-cache-mips.cc b/src/ic/mips/stub-cache-mips.cc
index 039763c..ec9f043 100644
--- a/src/ic/mips/stub-cache-mips.cc
+++ b/src/ic/mips/stub-cache-mips.cc
@@ -14,10 +14,9 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Kind ic_kind, Code::Flags flags,
-                       StubCache::Table table, Register receiver, Register name,
+                       Code::Flags flags, StubCache::Table table,
+                       Register receiver, Register name,
                        // Number of the cache entry, not scaled.
                        Register offset, Register scratch, Register scratch2,
                        Register offset_scratch) {
@@ -97,9 +96,6 @@
   // entry size being 12.
   DCHECK(sizeof(Entry) == 12);
 
-  // Make sure the flags does not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Make sure that there are no register conflicts.
   DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
@@ -145,8 +141,8 @@
   __ And(scratch, scratch, Operand(mask));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Primary miss: Compute hash for secondary probe.
   __ srl(at, name, kCacheIndexShift);
@@ -156,8 +152,8 @@
   __ And(scratch, scratch, Operand(mask2));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
index 52260ee..81a9b3f 100644
--- a/src/ic/mips64/handler-compiler-mips64.cc
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -584,66 +584,6 @@
 }
 
 
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  // Here and below +1 is for name() pushed after the args_ array.
-  typedef PropertyCallbackArguments PCA;
-  __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
-  __ sd(receiver(), MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ li(scratch2(), data);
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch2(), cell);
-  }
-  __ sd(scratch2(), MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
-  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
-  __ sd(scratch2(),
-        MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
-  __ sd(scratch2(), MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
-                                       kPointerSize));
-  __ li(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
-  __ sd(scratch2(), MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
-  __ sd(reg, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
-  // should_throw_on_error -> false
-  DCHECK(Smi::FromInt(0) == nullptr);
-  __ sd(zero_reg,
-        MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
-
-  __ sd(name(), MemOperand(sp, 0 * kPointerSize));
-
-  // Abi for CallApiGetter.
-  Register getter_address_reg = ApiGetterDescriptor::function_address();
-
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ li(getter_address_reg, Operand(ref));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -737,7 +677,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -778,7 +718,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index f46c9dc..5193c85 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -418,9 +418,9 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ li(slot, Operand(Smi::FromInt(slot_index)));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key, a4, a5, a6, t1);
   // Cache miss.
   GenerateMiss(masm);
@@ -622,11 +622,10 @@
   __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
   __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ And(a4, a4,
-         Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
   __ Branch(&slow, ne, a4, Operand(zero_reg));
   // Check if the object is a JS array or not.
   __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -667,10 +666,10 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ li(slot, Operand(Smi::FromInt(slot_index)));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, key, a5, a6, a7, t0);
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, a5, a6, a7, t0);
   // Cache miss.
   __ Branch(&miss);
 
@@ -741,10 +740,10 @@
   DCHECK(StoreDescriptor::ValueRegister().is(a0));
 
   // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, a3, a4, a5, a6);
+                                               receiver, name, a5, a6, a7, t0);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
diff --git a/src/ic/mips64/stub-cache-mips64.cc b/src/ic/mips64/stub-cache-mips64.cc
index 0bd7dd0..d000c52 100644
--- a/src/ic/mips64/stub-cache-mips64.cc
+++ b/src/ic/mips64/stub-cache-mips64.cc
@@ -14,10 +14,9 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Kind ic_kind, Code::Flags flags,
-                       StubCache::Table table, Register receiver, Register name,
+                       Code::Flags flags, StubCache::Table table,
+                       Register receiver, Register name,
                        // Number of the cache entry, not scaled.
                        Register offset, Register scratch, Register scratch2,
                        Register offset_scratch) {
@@ -100,9 +99,6 @@
   // DCHECK(sizeof(Entry) == 12);
   // DCHECK(sizeof(Entry) == 3 * kPointerSize);
 
-  // Make sure the flags does not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Make sure that there are no register conflicts.
   DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
@@ -148,8 +144,8 @@
   __ And(scratch, scratch, Operand(mask));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Primary miss: Compute hash for secondary probe.
   __ dsrl(at, name, kCacheIndexShift);
@@ -159,8 +155,8 @@
   __ And(scratch, scratch, Operand(mask2));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
diff --git a/src/ic/ppc/handler-compiler-ppc.cc b/src/ic/ppc/handler-compiler-ppc.cc
index 832c25a..49af112 100644
--- a/src/ic/ppc/handler-compiler-ppc.cc
+++ b/src/ic/ppc/handler-compiler-ppc.cc
@@ -592,56 +592,6 @@
 }
 
 
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  __ push(receiver());
-  // Push data from AccessorInfo.
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ Move(scratch2(), data);
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch2(), cell);
-  }
-  __ push(scratch2());
-  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
-  __ Push(scratch2(), scratch2());
-  __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
-  // should_throw_on_error -> false
-  __ mov(scratch3(), Operand(Smi::FromInt(0)));
-  __ Push(scratch2(), reg, scratch3(), name());
-
-  // Abi for CallApiGetter
-  Register getter_address_reg = ApiGetterDescriptor::function_address();
-
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ mov(getter_address_reg, Operand(ref));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -737,7 +687,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -777,7 +727,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/ppc/ic-ppc.cc b/src/ic/ppc/ic-ppc.cc
index 567296c..3c86786 100644
--- a/src/ic/ppc/ic-ppc.cc
+++ b/src/ic/ppc/ic-ppc.cc
@@ -425,8 +425,8 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key, r7, r8, r9, r10);
   // Cache miss.
@@ -639,11 +639,10 @@
   __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
   __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ andi(r0, ip,
-          Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ andi(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
   __ bne(&slow, cr0);
   // Check if the object is a JS array or not.
   __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -686,10 +685,10 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, key, r8, r9, r10, r11);
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, r8, r9, r10, r11);
   // Cache miss.
   __ b(&miss);
 
@@ -747,11 +746,11 @@
   DCHECK(StoreDescriptor::ValueRegister().is(r3));
 
   // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
 
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, r6, r7, r8, r9);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, receiver, name, r8, r9, r10, r11);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
diff --git a/src/ic/ppc/stub-cache-ppc.cc b/src/ic/ppc/stub-cache-ppc.cc
index 6030b2c..45a620c 100644
--- a/src/ic/ppc/stub-cache-ppc.cc
+++ b/src/ic/ppc/stub-cache-ppc.cc
@@ -14,10 +14,9 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Kind ic_kind, Code::Flags flags,
-                       StubCache::Table table, Register receiver, Register name,
+                       Code::Flags flags, StubCache::Table table,
+                       Register receiver, Register name,
                        // Number of the cache entry, not scaled.
                        Register offset, Register scratch, Register scratch2,
                        Register offset_scratch) {
@@ -120,9 +119,6 @@
   DCHECK(sizeof(Entry) == 12);
 #endif
 
-  // Make sure the flags does not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Make sure that there are no register conflicts.
   DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
@@ -166,8 +162,8 @@
           Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Primary miss: Compute hash for secondary probe.
   __ sub(scratch, scratch, name);
@@ -176,8 +172,8 @@
           Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
diff --git a/src/ic/s390/handler-compiler-s390.cc b/src/ic/s390/handler-compiler-s390.cc
index 1b39782..f15a04d 100644
--- a/src/ic/s390/handler-compiler-s390.cc
+++ b/src/ic/s390/handler-compiler-s390.cc
@@ -562,55 +562,6 @@
   __ Ret();
 }
 
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
-  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  __ Push(receiver());
-  // Push data from AccessorInfo.
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ Move(scratch2(), data);
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch2(), cell);
-  }
-  __ push(scratch2());
-  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
-  __ Push(scratch2(), scratch2());
-  __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
-  // should_throw_on_error -> false
-  __ mov(scratch3(), Operand(Smi::FromInt(0)));
-  __ Push(scratch2(), reg, scratch3(), name());
-
-  // Abi for CallApiGetter
-  Register getter_address_reg = ApiGetterDescriptor::function_address();
-
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ mov(getter_address_reg, Operand(ref));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -703,7 +654,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 Register NamedStoreHandlerCompiler::value() {
@@ -740,7 +691,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 #undef __
diff --git a/src/ic/s390/ic-s390.cc b/src/ic/s390/ic-s390.cc
index d4f2886..bf9f8a1 100644
--- a/src/ic/s390/ic-s390.cc
+++ b/src/ic/s390/ic-s390.cc
@@ -412,8 +412,8 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key, r6, r7, r8, r9);
   // Cache miss.
@@ -625,11 +625,10 @@
   __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
   __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ AndP(r0, ip,
-          Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ AndP(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
   __ bne(&slow, Label::kNear);
   // Check if the object is a JS array or not.
   __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -671,10 +670,10 @@
   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
   __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, key, r7, r8, r9, ip);
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, r7, r8, r9, ip);
   // Cache miss.
   __ b(&miss);
 
@@ -728,11 +727,11 @@
   DCHECK(StoreDescriptor::ValueRegister().is(r2));
 
   // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
 
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, name, r5, r6, r7, r8);
+                                               receiver, name, r7, r8, r9, ip);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
diff --git a/src/ic/s390/stub-cache-s390.cc b/src/ic/s390/stub-cache-s390.cc
index 054b946..cd50e56 100644
--- a/src/ic/s390/stub-cache-s390.cc
+++ b/src/ic/s390/stub-cache-s390.cc
@@ -15,8 +15,8 @@
 #define __ ACCESS_MASM(masm)
 
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Kind ic_kind, Code::Flags flags,
-                       StubCache::Table table, Register receiver, Register name,
+                       Code::Flags flags, StubCache::Table table,
+                       Register receiver, Register name,
                        // Number of the cache entry, not scaled.
                        Register offset, Register scratch, Register scratch2,
                        Register offset_scratch) {
@@ -114,9 +114,6 @@
   DCHECK(sizeof(Entry) == 12);
 #endif
 
-  // Make sure the flags does not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Make sure that there are no register conflicts.
   DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
@@ -160,8 +157,8 @@
           Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Primary miss: Compute hash for secondary probe.
   __ SubP(scratch, scratch, name);
@@ -170,8 +167,8 @@
           Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
-             extra, extra2, extra3);
+  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra,
+             extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc
index 4a5f9bd..5d71c1f 100644
--- a/src/ic/stub-cache.cc
+++ b/src/ic/stub-cache.cc
@@ -23,7 +23,7 @@
 
 static Code::Flags CommonStubCacheChecks(Name* name, Map* map,
                                          Code::Flags flags) {
-  flags = Code::RemoveTypeAndHolderFromFlags(flags);
+  flags = Code::RemoveHolderFromFlags(flags);
 
   // Validate that the name does not move on scavenge, and that we
   // can use identity checks instead of structural equality checks.
@@ -36,8 +36,7 @@
   DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
   STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
 
-  // Make sure that the code type and cache holder are not included in the hash.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+  // Make sure that the cache holder are not included in the hash.
   DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
 
   return flags;
@@ -56,8 +55,7 @@
   // secondary cache before overwriting it.
   if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
     Map* old_map = primary->map;
-    Code::Flags old_flags =
-        Code::RemoveTypeAndHolderFromFlags(old_code->flags());
+    Code::Flags old_flags = Code::RemoveHolderFromFlags(old_code->flags());
     int seed = PrimaryOffset(primary->key, old_flags, old_map);
     int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
     Entry* secondary = entry(secondary_, secondary_offset);
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
index dde6169..21d96ea 100644
--- a/src/ic/x64/handler-compiler-x64.cc
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -597,58 +597,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(kScratchRegister, scratch2(), scratch3(), receiver()));
-  DCHECK(!AreAliased(kScratchRegister, scratch2(), scratch3(), reg));
-
-  // Insert additional parameters into the stack frame above return address.
-  __ PopReturnAddressTo(scratch3());
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  __ Push(receiver());  // receiver
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ Push(data);
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch2(), cell);
-    __ Push(scratch2());
-  }
-  __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
-  __ Push(kScratchRegister);  // return value
-  __ Push(kScratchRegister);  // return value default
-  __ PushAddress(ExternalReference::isolate_address(isolate()));
-  __ Push(reg);     // holder
-  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
-
-  __ Push(name());  // name
-  __ PushReturnAddressFrom(scratch3());
-
-  // Abi for CallApiGetter
-  Register api_function_address = ApiGetterDescriptor::function_address();
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
   // Return the constant value.
   __ Move(rax, value);
@@ -756,7 +704,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -798,7 +746,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index 247116d..4e9927d 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -340,8 +340,8 @@
   __ Move(vector, dummy_vector);
   __ Move(slot, Smi::FromInt(slot_index));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key,
                                                megamorphic_scratch, no_reg);
@@ -519,10 +519,10 @@
   __ JumpIfSmi(receiver, &slow_with_tagged_index);
   // Get the map from the receiver.
   __ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ testb(FieldOperand(r9, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+           Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow_with_tagged_index);
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &maybe_name_key);
@@ -567,10 +567,10 @@
   __ Move(vector, dummy_vector);
   __ Move(slot, Smi::FromInt(slot_index));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, key, r9, no_reg);
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, r9, no_reg);
   // Cache miss.
   __ jmp(&miss);
 
diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc
index 9a9dfe9..a65165b 100644
--- a/src/ic/x64/stub-cache-x64.cc
+++ b/src/ic/x64/stub-cache-x64.cc
@@ -14,10 +14,9 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Kind ic_kind, Code::Flags flags,
-                       StubCache::Table table, Register receiver, Register name,
+                       Code::Flags flags, StubCache::Table table,
+                       Register receiver, Register name,
                        // The offset is scaled by 4, based on
                        // kCacheIndexShift, which is two bits
                        Register offset) {
@@ -93,9 +92,6 @@
   // entry size being 3 * kPointerSize.
   DCHECK(sizeof(Entry) == 3 * kPointerSize);
 
-  // Make sure the flags do not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Make sure that there are no register conflicts.
   DCHECK(!scratch.is(receiver));
   DCHECK(!scratch.is(name));
@@ -139,7 +135,7 @@
   __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch);
+  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
 
   // Primary miss: Compute hash for secondary probe.
   __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
@@ -151,8 +147,7 @@
   __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name,
-             scratch);
+  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index 281faba..7983273 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -594,58 +594,6 @@
 }
 
 
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<AccessorInfo> callback) {
-  DCHECK(!AreAliased(scratch2(), scratch3(), receiver()));
-  DCHECK(!AreAliased(scratch2(), scratch3(), reg));
-
-  // Insert additional parameters into the stack frame above return address.
-  __ pop(scratch3());  // Get return address to place it below.
-
-  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
-  // name below the exit frame to make GC aware of them.
-  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
-  __ push(receiver());  // receiver
-  // Push data from AccessorInfo.
-  Handle<Object> data(callback->data(), isolate());
-  if (data->IsUndefined() || data->IsSmi()) {
-    __ push(Immediate(data));
-  } else {
-    Handle<WeakCell> cell =
-        isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
-    // The callback is alive if this instruction is executed,
-    // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch2(), cell);
-    __ push(scratch2());
-  }
-  __ push(Immediate(isolate()->factory()->undefined_value()));  // ReturnValue
-  // ReturnValue default value
-  __ push(Immediate(isolate()->factory()->undefined_value()));
-  __ push(Immediate(reinterpret_cast<int>(isolate())));
-  __ push(reg);  // holder
-  __ push(Immediate(Smi::FromInt(0)));  // should_throw_on_error -> false
-
-  __ push(name());  // name
-  __ push(scratch3());  // Restore return address.
-
-  // Abi for CallApiGetter
-  Register getter_address = ApiGetterDescriptor::function_address();
-  Address function_address = v8::ToCData<Address>(callback->getter());
-  __ mov(getter_address, Immediate(function_address));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
   // Return the constant value.
   __ LoadObject(eax, value);
@@ -759,7 +707,7 @@
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
 
   // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
+  return GetCode(kind(), name);
 }
 
 
@@ -801,7 +749,7 @@
   FrontendFooter(name, &miss);
 
   // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
+  return GetCode(kind(), name);
 }
 
 
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index b51045b..9491954 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -336,8 +336,8 @@
   __ push(Immediate(Smi::FromInt(slot)));
   __ push(Immediate(dummy_vector));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
                                                receiver, key, ebx, edi);
 
@@ -519,10 +519,10 @@
   __ JumpIfSmi(receiver, &slow);
   // Get the map from the receiver.
   __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
+  // Check that the receiver does not require access checks.
+  // The generic stub does not perform map checks.
   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow);
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &maybe_name_key);
@@ -563,10 +563,10 @@
   __ push(Immediate(Smi::FromInt(slot)));
   __ push(Immediate(dummy_vector));
 
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
-                                               receiver, key, edi, no_reg);
+  Code::Flags flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::KEYED_STORE_IC, flags, receiver, key, edi, no_reg);
 
   __ pop(VectorStoreICDescriptor::VectorRegister());
   __ pop(VectorStoreICDescriptor::SlotRegister());
diff --git a/src/ic/x87/stub-cache-x87.cc b/src/ic/x87/stub-cache-x87.cc
index dfc0ef6..b238e5f 100644
--- a/src/ic/x87/stub-cache-x87.cc
+++ b/src/ic/x87/stub-cache-x87.cc
@@ -153,9 +153,6 @@
   // being 12.
   DCHECK(sizeof(Entry) == 12);
 
-  // Assert the flags do not name a specific type.
-  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
   // Assert that there are no register conflicts.
   DCHECK(!scratch.is(receiver));
   DCHECK(!scratch.is(name));
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
index 9ee4269..860ad2a 100644
--- a/src/interface-descriptors.cc
+++ b/src/interface-descriptors.cc
@@ -75,12 +75,6 @@
 }
 
 
-void AllocateMutableHeapNumberDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  data->InitializePlatformSpecific(0, nullptr, nullptr);
-}
-
-
 void VoidDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   data->InitializePlatformSpecific(0, nullptr);
@@ -184,13 +178,6 @@
 }
 
 
-void InstanceOfDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {LeftRegister(), RightRegister()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void StringCompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {LeftRegister(), RightRegister()};
@@ -203,6 +190,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void HasPropertyDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {KeyRegister(), ObjectRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void MathPowTaggedDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -301,23 +293,17 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-FunctionType* ApiGetterDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int paramater_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
-  function->InitParameter(0, ExternalPointer(zone));
-  return function;
+const Register ApiGetterDescriptor::ReceiverRegister() {
+  return LoadDescriptor::ReceiverRegister();
 }
 
-
 void ApiGetterDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {function_address()};
+  Register registers[] = {ReceiverRegister(), HolderRegister(),
+                          CallbackRegister()};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void ContextOnlyDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   data->InitializePlatformSpecific(0, nullptr);
@@ -446,6 +432,19 @@
 }
 
 FunctionType*
+ArrayNoArgumentConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+    Isolate* isolate, int paramater_count) {
+  Zone* zone = isolate->interface_descriptor_zone();
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
+  function->InitParameter(0, Type::Receiver());  // JSFunction
+  function->InitParameter(1, AnyTagged(zone));
+  function->InitParameter(2, UntaggedIntegral32(zone));
+  function->InitParameter(3, AnyTagged(zone));
+  return function;
+}
+
+FunctionType*
 ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
@@ -528,9 +527,8 @@
     Isolate* isolate, int parameter_count) {
   Zone* zone = isolate->interface_descriptor_zone();
   FunctionType* function =
-      Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
   function->InitParameter(kAccumulatorParameter, AnyTagged(zone));
-  function->InitParameter(kRegisterFileParameter, ExternalPointer(zone));
   function->InitParameter(kBytecodeOffsetParameter, UntaggedIntegral32(zone));
   function->InitParameter(kBytecodeArrayParameter, AnyTagged(zone));
   function->InitParameter(kDispatchTableParameter, AnyTagged(zone));
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
index dcce0af..60d8723 100644
--- a/src/interface-descriptors.h
+++ b/src/interface-descriptors.h
@@ -21,7 +21,6 @@
   V(VectorStoreTransition)                    \
   V(VectorStoreICTrampoline)                  \
   V(VectorStoreIC)                            \
-  V(InstanceOf)                               \
   V(LoadWithVector)                           \
   V(FastArrayPush)                            \
   V(FastNewClosure)                           \
@@ -47,7 +46,6 @@
   V(RegExpConstructResult)                    \
   V(TransitionElementsKind)                   \
   V(AllocateHeapNumber)                       \
-  V(AllocateMutableHeapNumber)                \
   V(AllocateFloat32x4)                        \
   V(AllocateInt32x4)                          \
   V(AllocateUint32x4)                         \
@@ -58,7 +56,7 @@
   V(AllocateInt8x16)                          \
   V(AllocateUint8x16)                         \
   V(AllocateBool8x16)                         \
-  V(AllocateInNewSpace)                       \
+  V(ArrayNoArgumentConstructor)               \
   V(ArrayConstructorConstantArgCount)         \
   V(ArrayConstructor)                         \
   V(InternalArrayConstructorConstantArgCount) \
@@ -66,10 +64,12 @@
   V(Compare)                                  \
   V(BinaryOp)                                 \
   V(BinaryOpWithAllocationSite)               \
+  V(CountOp)                                  \
   V(StringAdd)                                \
   V(StringCompare)                            \
   V(Keyed)                                    \
   V(Named)                                    \
+  V(HasProperty)                              \
   V(CallHandler)                              \
   V(ArgumentAdaptor)                          \
   V(ApiCallbackWith0Args)                     \
@@ -90,7 +90,8 @@
   V(InterpreterDispatch)                      \
   V(InterpreterPushArgsAndCall)               \
   V(InterpreterPushArgsAndConstruct)          \
-  V(InterpreterCEntry)
+  V(InterpreterCEntry)                        \
+  V(ResumeGenerator)
 
 class CallInterfaceDescriptorData {
  public:
@@ -334,16 +335,6 @@
 };
 
 
-class InstanceOfDescriptor final : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(InstanceOfDescriptor, CallInterfaceDescriptor)
-
-  enum ParameterIndices { kLeftIndex, kRightIndex, kParameterCount };
-  static const Register LeftRegister();
-  static const Register RightRegister();
-};
-
-
 class VectorStoreICTrampolineDescriptor : public StoreDescriptor {
  public:
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
@@ -430,6 +421,15 @@
   static const Register ArgumentRegister();
 };
 
+class HasPropertyDescriptor final : public CallInterfaceDescriptor {
+ public:
+  enum ParameterIndices { kKeyIndex, kObjectIndex };
+
+  DECLARE_DESCRIPTOR(HasPropertyDescriptor, CallInterfaceDescriptor)
+
+  static const Register KeyRegister();
+  static const Register ObjectRegister();
+};
 
 class TypeofDescriptor : public CallInterfaceDescriptor {
  public:
@@ -570,19 +570,18 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
+class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR(AllocateMutableHeapNumberDescriptor,
-                     CallInterfaceDescriptor)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+      ArrayNoArgumentConstructorDescriptor, CallInterfaceDescriptor)
+  enum ParameterIndices {
+    kFunctionIndex,
+    kAllocationSiteIndex,
+    kArgumentCountIndex,
+    kContextIndex
+  };
 };
 
-
-class AllocateInNewSpaceDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(AllocateInNewSpaceDescriptor, CallInterfaceDescriptor)
-};
-
-
 class ArrayConstructorConstantArgCountDescriptor
     : public CallInterfaceDescriptor {
  public:
@@ -631,6 +630,10 @@
                      CallInterfaceDescriptor)
 };
 
+class CountOpDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(CountOpDescriptor, CallInterfaceDescriptor)
+};
 
 class StringAddDescriptor : public CallInterfaceDescriptor {
  public:
@@ -746,10 +749,11 @@
 
 class ApiGetterDescriptor : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiGetterDescriptor,
-                                               CallInterfaceDescriptor)
+  DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
 
-  static const Register function_address();
+  static const Register ReceiverRegister();
+  static const Register HolderRegister();
+  static const Register CallbackRegister();
 };
 
 
@@ -795,11 +799,9 @@
                                                CallInterfaceDescriptor)
 
   static const int kAccumulatorParameter = 0;
-  static const int kRegisterFileParameter = 1;
-  static const int kBytecodeOffsetParameter = 2;
-  static const int kBytecodeArrayParameter = 3;
-  static const int kDispatchTableParameter = 4;
-  static const int kContextParameter = 5;
+  static const int kBytecodeOffsetParameter = 1;
+  static const int kBytecodeArrayParameter = 2;
+  static const int kDispatchTableParameter = 3;
 };
 
 class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
@@ -822,6 +824,11 @@
   DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
 };
 
+class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
+};
+
 #undef DECLARE_DESCRIPTOR_WITH_BASE
 #undef DECLARE_DESCRIPTOR
 #undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 109b01e..75bf631 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -3,117 +3,43 @@
 // found in the LICENSE file.
 
 #include "src/interpreter/bytecode-array-builder.h"
+
 #include "src/compiler.h"
+#include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-peephole-optimizer.h"
 #include "src/interpreter/interpreter-intrinsics.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
-class BytecodeArrayBuilder::PreviousBytecodeHelper BASE_EMBEDDED {
- public:
-  explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
-      : array_builder_(array_builder),
-        previous_bytecode_start_(array_builder_.last_bytecode_start_) {
-    // This helper is expected to be instantiated only when the last bytecode is
-    // in the same basic block.
-    DCHECK(array_builder_.LastBytecodeInSameBlock());
-    bytecode_ = Bytecodes::FromByte(
-        array_builder_.bytecodes()->at(previous_bytecode_start_));
-    operand_scale_ = OperandScale::kSingle;
-    if (Bytecodes::IsPrefixScalingBytecode(bytecode_)) {
-      operand_scale_ = Bytecodes::PrefixBytecodeToOperandScale(bytecode_);
-      bytecode_ = Bytecodes::FromByte(
-          array_builder_.bytecodes()->at(previous_bytecode_start_ + 1));
-    }
-  }
-
-  // Returns the previous bytecode in the same basic block.
-  MUST_USE_RESULT Bytecode GetBytecode() const {
-    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
-    return bytecode_;
-  }
-
-  MUST_USE_RESULT Register GetRegisterOperand(int operand_index) const {
-    return Register::FromOperand(GetSignedOperand(operand_index));
-  }
-
-  MUST_USE_RESULT uint32_t GetIndexOperand(int operand_index) const {
-    return GetUnsignedOperand(operand_index);
-  }
-
-  Handle<Object> GetConstantForIndexOperand(int operand_index) const {
-    return array_builder_.constant_array_builder()->At(
-        GetIndexOperand(operand_index));
-  }
-
- private:
-  // Returns the signed operand at operand_index for the previous
-  // bytecode in the same basic block.
-  MUST_USE_RESULT int32_t GetSignedOperand(int operand_index) const {
-    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
-    OperandType operand_type =
-        Bytecodes::GetOperandType(bytecode_, operand_index);
-    DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
-    const uint8_t* operand_start = GetOperandStart(operand_index);
-    return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
-                                          operand_scale_);
-  }
-
-  // Returns the unsigned operand at operand_index for the previous
-  // bytecode in the same basic block.
-  MUST_USE_RESULT uint32_t GetUnsignedOperand(int operand_index) const {
-    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
-    OperandType operand_type =
-        Bytecodes::GetOperandType(bytecode_, operand_index);
-    DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
-    const uint8_t* operand_start = GetOperandStart(operand_index);
-    return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
-                                            operand_scale_);
-  }
-
-  const uint8_t* GetOperandStart(int operand_index) const {
-    size_t operand_offset =
-        previous_bytecode_start_ + prefix_offset() +
-        Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale_);
-    return &(*array_builder_.bytecodes())[0] + operand_offset;
-  }
-
-  int prefix_offset() const {
-    return Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_) ? 1
-                                                                         : 0;
-  }
-
-  const BytecodeArrayBuilder& array_builder_;
-  OperandScale operand_scale_;
-  Bytecode bytecode_;
-  size_t previous_bytecode_start_;
-
-  DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
-};
-
 BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
                                            int parameter_count,
                                            int context_count, int locals_count,
                                            FunctionLiteral* literal)
     : isolate_(isolate),
       zone_(zone),
-      bytecodes_(zone),
       bytecode_generated_(false),
       constant_array_builder_(isolate, zone),
       handler_table_builder_(isolate, zone),
       source_position_table_builder_(isolate, zone),
-      last_block_end_(0),
-      last_bytecode_start_(~0),
       exit_seen_in_block_(false),
       unbound_jumps_(0),
       parameter_count_(parameter_count),
       local_register_count_(locals_count),
       context_register_count_(context_count),
-      temporary_allocator_(zone, fixed_register_count()) {
+      temporary_allocator_(zone, fixed_register_count()),
+      bytecode_array_writer_(zone, &source_position_table_builder_),
+      pipeline_(&bytecode_array_writer_) {
   DCHECK_GE(parameter_count_, 0);
   DCHECK_GE(context_register_count_, 0);
   DCHECK_GE(local_register_count_, 0);
+
+  if (FLAG_ignition_peephole) {
+    pipeline_ = new (zone)
+        BytecodePeepholeOptimizer(&constant_array_builder_, pipeline_);
+  }
+
   return_position_ =
       literal ? std::max(literal->start_position(), literal->end_position() - 1)
               : RelocInfo::kNoPosition;
@@ -121,8 +47,6 @@
                                source_position_table_builder()));
 }
 
-BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
-
 Register BytecodeArrayBuilder::first_context_register() const {
   DCHECK_GT(context_register_count_, 0);
   return Register(local_register_count_);
@@ -147,18 +71,26 @@
 
 
 Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
+  DCHECK_EQ(0, unbound_jumps_);
   DCHECK_EQ(bytecode_generated_, false);
   DCHECK(exit_seen_in_block_);
 
-  int bytecode_size = static_cast<int>(bytecodes_.size());
-  int register_count = fixed_and_temporary_register_count();
-  int frame_size = register_count * kPointerSize;
+  pipeline()->FlushBasicBlock();
+  const ZoneVector<uint8_t>* bytecodes = bytecode_array_writer()->bytecodes();
+
+  int bytecode_size = static_cast<int>(bytecodes->size());
+
+  // All locals need a frame slot for the debugger, but may not be
+  // present in generated code.
+  int frame_size_for_locals = fixed_register_count() * kPointerSize;
+  int frame_size_used = bytecode_array_writer()->GetMaximumFrameSizeUsed();
+  int frame_size = std::max(frame_size_for_locals, frame_size_used);
   Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
   Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
   Handle<ByteArray> source_position_table =
       source_position_table_builder()->ToSourcePositionTable();
   Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
-      bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
+      bytecode_size, &bytecodes->front(), frame_size, parameter_count(),
       constant_pool);
   bytecode_array->set_handler_table(*handler_table);
   bytecode_array->set_source_position_table(*source_position_table);
@@ -171,50 +103,10 @@
   return bytecode_array;
 }
 
-template <size_t N>
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t (&operands)[N],
-                                  OperandScale operand_scale) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-
-  int operand_count = static_cast<int>(N);
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
-
-  last_bytecode_start_ = bytecodes()->size();
-  // Emit prefix bytecode for scale if required.
-  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
-    bytecodes()->push_back(Bytecodes::ToByte(
-        Bytecodes::OperandScaleToPrefixBytecode(operand_scale)));
-  }
-
-  // Emit bytecode.
-  bytecodes()->push_back(Bytecodes::ToByte(bytecode));
-
-  // Emit operands.
-  for (int i = 0; i < operand_count; i++) {
-    DCHECK(OperandIsValid(bytecode, operand_scale, i, operands[i]));
-    switch (Bytecodes::GetOperandSize(bytecode, i, operand_scale)) {
-      case OperandSize::kNone:
-        UNREACHABLE();
-        break;
-      case OperandSize::kByte:
-        bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
-        break;
-      case OperandSize::kShort: {
-        uint8_t operand_bytes[2];
-        WriteUnalignedUInt16(operand_bytes, operands[i]);
-        bytecodes()->insert(bytecodes()->end(), operand_bytes,
-                            operand_bytes + 2);
-        break;
-      }
-      case OperandSize::kQuad: {
-        uint8_t operand_bytes[4];
-        WriteUnalignedUInt32(operand_bytes, operands[i]);
-        bytecodes()->insert(bytecodes()->end(), operand_bytes,
-                            operand_bytes + 4);
-        break;
-      }
-    }
+void BytecodeArrayBuilder::AttachSourceInfo(BytecodeNode* node) {
+  if (latest_source_info_.is_valid()) {
+    node->source_info().Update(latest_source_info_);
+    latest_source_info_.set_invalid();
   }
 }
 
@@ -222,44 +114,68 @@
   // Don't output dead code.
   if (exit_seen_in_block_) return;
 
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
-  last_bytecode_start_ = bytecodes()->size();
-  bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+  BytecodeNode node(bytecode);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
 }
 
 void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
                                         OperandScale operand_scale,
                                         uint32_t operand0, uint32_t operand1,
                                         uint32_t operand2, uint32_t operand3) {
-  uint32_t operands[] = {operand0, operand1, operand2, operand3};
-  Output(bytecode, operands, operand_scale);
+  // Don't output dead code.
+  if (exit_seen_in_block_) return;
+  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
+  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
+  DCHECK(OperandIsValid(bytecode, operand_scale, 2, operand2));
+  DCHECK(OperandIsValid(bytecode, operand_scale, 3, operand3));
+  BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
+                    operand_scale);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
 }
 
 void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
                                         OperandScale operand_scale,
                                         uint32_t operand0, uint32_t operand1,
                                         uint32_t operand2) {
-  uint32_t operands[] = {operand0, operand1, operand2};
-  Output(bytecode, operands, operand_scale);
+  // Don't output dead code.
+  if (exit_seen_in_block_) return;
+  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
+  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
+  DCHECK(OperandIsValid(bytecode, operand_scale, 2, operand2));
+  BytecodeNode node(bytecode, operand0, operand1, operand2, operand_scale);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
 }
 
 void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
                                         OperandScale operand_scale,
                                         uint32_t operand0, uint32_t operand1) {
-  uint32_t operands[] = {operand0, operand1};
-  Output(bytecode, operands, operand_scale);
+  // Don't output dead code.
+  if (exit_seen_in_block_) return;
+  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
+  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
+  BytecodeNode node(bytecode, operand0, operand1, operand_scale);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
 }
 
 void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
                                         OperandScale operand_scale,
                                         uint32_t operand0) {
-  uint32_t operands[] = {operand0};
-  Output(bytecode, operands, operand_scale);
+  // Don't output dead code.
+  if (exit_seen_in_block_) return;
+  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
+  BytecodeNode node(bytecode, operand0, operand_scale);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
                                                             Register reg) {
-  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+  OperandScale operand_scale =
+      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
   OutputScaled(BytecodeForBinaryOperation(op), operand_scale,
                RegisterOperand(reg));
   return *this;
@@ -272,7 +188,7 @@
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
-  Output(Bytecode::kLogicalNot);
+  Output(Bytecode::kToBooleanLogicalNot);
   return *this;
 }
 
@@ -284,7 +200,8 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
                                                              Register reg) {
-  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+  OperandScale operand_scale =
+      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
   OutputScaled(BytecodeForCompareOperation(op), operand_scale,
                RegisterOperand(reg));
   return *this;
@@ -297,8 +214,8 @@
   if (raw_smi == 0) {
     Output(Bytecode::kLdaZero);
   } else {
-    OperandSize operand_size = SizeForSignedOperand(raw_smi);
-    OperandScale operand_scale = OperandSizesToScale(operand_size);
+    OperandSize operand_size = Bytecodes::SizeForSignedOperand(raw_smi);
+    OperandScale operand_scale = Bytecodes::OperandSizesToScale(operand_size);
     OutputScaled(Bytecode::kLdaSmi, operand_scale,
                  SignedOperand(raw_smi, operand_size));
   }
@@ -309,7 +226,7 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
   size_t entry = GetConstantPoolEntry(object);
   OperandScale operand_scale =
-      OperandSizesToScale(SizeForUnsignedOperand(entry));
+      Bytecodes::OperandSizesToScale(Bytecodes::SizeForUnsignedOperand(entry));
   OutputScaled(Bytecode::kLdaConstant, operand_scale, UnsignedOperand(entry));
   return *this;
 }
@@ -346,22 +263,18 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
     Register reg) {
-  if (!IsRegisterInAccumulator(reg)) {
-    OperandScale operand_scale =
-        OperandSizesToScale(SizeForRegisterOperand(reg));
-    OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
-  }
+  OperandScale operand_scale =
+      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
+  OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
     Register reg) {
-  if (!IsRegisterInAccumulator(reg)) {
-    OperandScale operand_scale =
-        OperandSizesToScale(SizeForRegisterOperand(reg));
-    OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
-  }
+  OperandScale operand_scale =
+      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
+  OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
   return *this;
 }
 
@@ -369,8 +282,8 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
                                                          Register to) {
   DCHECK(from != to);
-  OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(from),
-                                                   SizeForRegisterOperand(to));
+  OperandScale operand_scale =
+      Bytecodes::OperandSizesToScale(from.SizeOfOperand(), to.SizeOfOperand());
   OutputScaled(Bytecode::kMov, operand_scale, RegisterOperand(from),
                RegisterOperand(to));
   return *this;
@@ -382,9 +295,9 @@
   // operand rather than having extra bytecodes.
   Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale =
-      OperandSizesToScale(SizeForUnsignedOperand(name_index),
-                          SizeForUnsignedOperand(feedback_slot));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      Bytecodes::SizeForUnsignedOperand(name_index),
+      Bytecodes::SizeForUnsignedOperand(feedback_slot));
   OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
                UnsignedOperand(feedback_slot));
   return *this;
@@ -394,9 +307,9 @@
     const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale =
-      OperandSizesToScale(SizeForUnsignedOperand(name_index),
-                          SizeForUnsignedOperand(feedback_slot));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      Bytecodes::SizeForUnsignedOperand(name_index),
+      Bytecodes::SizeForUnsignedOperand(feedback_slot));
   OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
                UnsignedOperand(feedback_slot));
   return *this;
@@ -405,8 +318,8 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
                                                             int slot_index) {
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      context.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(slot_index));
   OutputScaled(Bytecode::kLdaContextSlot, operand_scale,
                RegisterOperand(context), UnsignedOperand(slot_index));
   return *this;
@@ -415,8 +328,8 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
                                                              int slot_index) {
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      context.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(slot_index));
   OutputScaled(Bytecode::kStaContextSlot, operand_scale,
                RegisterOperand(context), UnsignedOperand(slot_index));
   return *this;
@@ -428,8 +341,8 @@
                           ? Bytecode::kLdaLookupSlotInsideTypeof
                           : Bytecode::kLdaLookupSlot;
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale =
-      OperandSizesToScale(SizeForUnsignedOperand(name_index));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      Bytecodes::SizeForUnsignedOperand(name_index));
   OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
   return *this;
 }
@@ -438,8 +351,8 @@
     const Handle<String> name, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale =
-      OperandSizesToScale(SizeForUnsignedOperand(name_index));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      Bytecodes::SizeForUnsignedOperand(name_index));
   OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
   return *this;
 }
@@ -447,9 +360,9 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
-      SizeForUnsignedOperand(feedback_slot));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(name_index),
+      Bytecodes::SizeForUnsignedOperand(feedback_slot));
   OutputScaled(Bytecode::kLoadIC, operand_scale, RegisterOperand(object),
                UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
@@ -457,8 +370,8 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
     Register object, int feedback_slot) {
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(object), SizeForUnsignedOperand(feedback_slot));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(feedback_slot));
   OutputScaled(Bytecode::kKeyedLoadIC, operand_scale, RegisterOperand(object),
                UnsignedOperand(feedback_slot));
   return *this;
@@ -469,9 +382,9 @@
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreIC(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
-      SizeForUnsignedOperand(feedback_slot));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(name_index),
+      Bytecodes::SizeForUnsignedOperand(feedback_slot));
   OutputScaled(bytecode, operand_scale, RegisterOperand(object),
                UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
@@ -482,9 +395,9 @@
     Register object, Register key, int feedback_slot,
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(object), SizeForRegisterOperand(key),
-      SizeForUnsignedOperand(feedback_slot));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      object.SizeOfOperand(), key.SizeOfOperand(),
+      Bytecodes::SizeForUnsignedOperand(feedback_slot));
   OutputScaled(bytecode, operand_scale, RegisterOperand(object),
                RegisterOperand(key), UnsignedOperand(feedback_slot));
   return *this;
@@ -495,7 +408,7 @@
     Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
   size_t entry = GetConstantPoolEntry(shared_info);
   OperandScale operand_scale =
-      OperandSizesToScale(SizeForUnsignedOperand(entry));
+      Bytecodes::OperandSizesToScale(Bytecodes::SizeForUnsignedOperand(entry));
   OutputScaled(Bytecode::kCreateClosure, operand_scale, UnsignedOperand(entry),
                UnsignedOperand(static_cast<size_t>(tenured)));
   return *this;
@@ -516,9 +429,10 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
     Handle<String> pattern, int literal_index, int flags) {
   size_t pattern_entry = GetConstantPoolEntry(pattern);
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForUnsignedOperand(pattern_entry),
-      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      Bytecodes::SizeForUnsignedOperand(pattern_entry),
+      Bytecodes::SizeForUnsignedOperand(literal_index),
+      Bytecodes::SizeForUnsignedOperand(flags));
   OutputScaled(Bytecode::kCreateRegExpLiteral, operand_scale,
                UnsignedOperand(pattern_entry), UnsignedOperand(literal_index),
                UnsignedOperand(flags));
@@ -529,9 +443,10 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
     Handle<FixedArray> constant_elements, int literal_index, int flags) {
   size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForUnsignedOperand(constant_elements_entry),
-      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      Bytecodes::SizeForUnsignedOperand(constant_elements_entry),
+      Bytecodes::SizeForUnsignedOperand(literal_index),
+      Bytecodes::SizeForUnsignedOperand(flags));
   OutputScaled(Bytecode::kCreateArrayLiteral, operand_scale,
                UnsignedOperand(constant_elements_entry),
                UnsignedOperand(literal_index), UnsignedOperand(flags));
@@ -542,9 +457,10 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
     Handle<FixedArray> constant_properties, int literal_index, int flags) {
   size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForUnsignedOperand(constant_properties_entry),
-      SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      Bytecodes::SizeForUnsignedOperand(constant_properties_entry),
+      Bytecodes::SizeForUnsignedOperand(literal_index),
+      Bytecodes::SizeForUnsignedOperand(flags));
   OutputScaled(Bytecode::kCreateObjectLiteral, operand_scale,
                UnsignedOperand(constant_properties_entry),
                UnsignedOperand(literal_index), UnsignedOperand(flags));
@@ -554,7 +470,7 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
   OperandScale operand_scale =
-      OperandSizesToScale(SizeForRegisterOperand(context));
+      Bytecodes::OperandSizesToScale(context.SizeOfOperand());
   OutputScaled(Bytecode::kPushContext, operand_scale, RegisterOperand(context));
   return *this;
 }
@@ -562,39 +478,12 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
   OperandScale operand_scale =
-      OperandSizesToScale(SizeForRegisterOperand(context));
+      Bytecodes::OperandSizesToScale(context.SizeOfOperand());
   OutputScaled(Bytecode::kPopContext, operand_scale, RegisterOperand(context));
   return *this;
 }
 
 
-bool BytecodeArrayBuilder::NeedToBooleanCast() {
-  if (!LastBytecodeInSameBlock()) {
-    return true;
-  }
-  PreviousBytecodeHelper previous_bytecode(*this);
-  switch (previous_bytecode.GetBytecode()) {
-    // If the previous bytecode puts a boolean in the accumulator return true.
-    case Bytecode::kLdaTrue:
-    case Bytecode::kLdaFalse:
-    case Bytecode::kLogicalNot:
-    case Bytecode::kTestEqual:
-    case Bytecode::kTestNotEqual:
-    case Bytecode::kTestEqualStrict:
-    case Bytecode::kTestLessThan:
-    case Bytecode::kTestLessThanOrEqual:
-    case Bytecode::kTestGreaterThan:
-    case Bytecode::kTestGreaterThanOrEqual:
-    case Bytecode::kTestInstanceOf:
-    case Bytecode::kTestIn:
-    case Bytecode::kForInDone:
-      return false;
-    default:
-      return true;
-  }
-}
-
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
   Output(Bytecode::kToObject);
   return *this;
@@ -602,41 +491,24 @@
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
-  if (LastBytecodeInSameBlock()) {
-    PreviousBytecodeHelper previous_bytecode(*this);
-    switch (previous_bytecode.GetBytecode()) {
-      case Bytecode::kToName:
-      case Bytecode::kTypeOf:
-        return *this;
-      case Bytecode::kLdaConstant: {
-        Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
-        if (object->IsName()) return *this;
-        break;
-      }
-      default:
-        break;
-    }
-  }
   Output(Bytecode::kToName);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber() {
-  // TODO(rmcilroy): consider omitting if the preceeding bytecode always returns
-  // a number.
   Output(Bytecode::kToNumber);
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
+  size_t current_offset = pipeline()->FlushForOffset();
   if (label->is_forward_target()) {
     // An earlier jump instruction refers to this label. Update it's location.
-    PatchJump(bytecodes()->end(), bytecodes()->begin() + label->offset());
+    PatchJump(current_offset, label->offset());
     // Now treat as if the label will only be back referred to.
   }
-  label->bind_to(bytecodes()->size());
+  label->bind_to(current_offset);
   LeaveBasicBlock();
   return *this;
 }
@@ -646,10 +518,11 @@
                                                  BytecodeLabel* label) {
   DCHECK(!label->is_bound());
   DCHECK(target.is_bound());
+  // There is no need to flush the pipeline here, it will have been
+  // flushed when |target| was bound.
   if (label->is_forward_target()) {
     // An earlier jump instruction refers to this label. Update it's location.
-    PatchJump(bytecodes()->begin() + target.offset(),
-              bytecodes()->begin() + label->offset());
+    PatchJump(target.offset(), label->offset());
     // Now treat as if the label will only be back referred to.
   }
   label->bind_to(target.offset());
@@ -684,90 +557,74 @@
   }
 }
 
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
-  switch (jump_bytecode) {
-    case Bytecode::kJump:
-    case Bytecode::kJumpIfNull:
-    case Bytecode::kJumpIfUndefined:
-    case Bytecode::kJumpIfNotHole:
-      return jump_bytecode;
-    case Bytecode::kJumpIfTrue:
-      return Bytecode::kJumpIfToBooleanTrue;
-    case Bytecode::kJumpIfFalse:
-      return Bytecode::kJumpIfToBooleanFalse;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
-
-void BytecodeArrayBuilder::PatchIndirectJumpWith8BitOperand(
-    const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
-  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+void BytecodeArrayBuilder::PatchJumpWith8BitOperand(
+    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
-  ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
-  DCHECK_EQ(*operand_location, 0);
-  if (SizeForSignedOperand(delta) == OperandSize::kByte) {
+  size_t operand_location = jump_location + 1;
+  DCHECK_EQ(bytecodes->at(operand_location), 0);
+  if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
     // The jump fits within the range of an Imm operand, so cancel
     // the reservation and jump directly.
     constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
-    *operand_location = static_cast<uint8_t>(delta);
+    bytecodes->at(operand_location) = static_cast<uint8_t>(delta);
   } else {
     // The jump does not fit within the range of an Imm operand, so
     // commit reservation putting the offset into the constant pool,
     // and update the jump instruction and operand.
     size_t entry = constant_array_builder()->CommitReservedEntry(
         OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
-    DCHECK(SizeForUnsignedOperand(entry) == OperandSize::kByte);
+    DCHECK(Bytecodes::SizeForUnsignedOperand(entry) == OperandSize::kByte);
     jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
-    *jump_location = Bytecodes::ToByte(jump_bytecode);
-    *operand_location = static_cast<uint8_t>(entry);
+    bytecodes->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+    bytecodes->at(operand_location) = static_cast<uint8_t>(entry);
   }
 }
 
-void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
-    const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
-  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+void BytecodeArrayBuilder::PatchJumpWith16BitOperand(
+    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
-  ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+  size_t operand_location = jump_location + 1;
   uint8_t operand_bytes[2];
-  if (SizeForSignedOperand(delta) <= OperandSize::kShort) {
+  if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
     constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
     WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
   } else {
     jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
-    *jump_location = Bytecodes::ToByte(jump_bytecode);
+    bytecodes->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
     size_t entry = constant_array_builder()->CommitReservedEntry(
         OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
     WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
   }
-  DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
-  *operand_location++ = operand_bytes[0];
-  *operand_location = operand_bytes[1];
+  DCHECK(bytecodes->at(operand_location) == 0 &&
+         bytecodes->at(operand_location + 1) == 0);
+  bytecodes->at(operand_location++) = operand_bytes[0];
+  bytecodes->at(operand_location) = operand_bytes[1];
 }
 
-void BytecodeArrayBuilder::PatchIndirectJumpWith32BitOperand(
-    const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
-  DCHECK(Bytecodes::IsJumpImmediate(Bytecodes::FromByte(*jump_location)));
+void BytecodeArrayBuilder::PatchJumpWith32BitOperand(
+    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
+  DCHECK(Bytecodes::IsJumpImmediate(
+      Bytecodes::FromByte(bytecodes->at(jump_location))));
   constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
-  ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
   uint8_t operand_bytes[4];
   WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
-  DCHECK(*operand_location == 0 && *(operand_location + 1) == 0 &&
-         *(operand_location + 2) == 0 && *(operand_location + 3) == 0);
-  *operand_location++ = operand_bytes[0];
-  *operand_location++ = operand_bytes[1];
-  *operand_location++ = operand_bytes[2];
-  *operand_location = operand_bytes[3];
+  size_t operand_location = jump_location + 1;
+  DCHECK(bytecodes->at(operand_location) == 0 &&
+         bytecodes->at(operand_location + 1) == 0 &&
+         bytecodes->at(operand_location + 2) == 0 &&
+         bytecodes->at(operand_location + 3) == 0);
+  bytecodes->at(operand_location++) = operand_bytes[0];
+  bytecodes->at(operand_location++) = operand_bytes[1];
+  bytecodes->at(operand_location++) = operand_bytes[2];
+  bytecodes->at(operand_location) = operand_bytes[3];
 }
 
-void BytecodeArrayBuilder::PatchJump(
-    const ZoneVector<uint8_t>::iterator& jump_target,
-    const ZoneVector<uint8_t>::iterator& jump_location) {
+void BytecodeArrayBuilder::PatchJump(size_t jump_target, size_t jump_location) {
+  ZoneVector<uint8_t>* bytecodes = bytecode_array_writer()->bytecodes();
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
   int delta = static_cast<int>(jump_target - jump_location);
-  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
   int prefix_offset = 0;
   OperandScale operand_scale = OperandScale::kSingle;
   if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
@@ -776,19 +633,22 @@
     delta -= 1;
     prefix_offset = 1;
     operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
-    jump_bytecode = Bytecodes::FromByte(*(jump_location + prefix_offset));
+    jump_bytecode =
+        Bytecodes::FromByte(bytecodes->at(jump_location + prefix_offset));
   }
 
   DCHECK(Bytecodes::IsJump(jump_bytecode));
   switch (operand_scale) {
     case OperandScale::kSingle:
-      PatchIndirectJumpWith8BitOperand(jump_location, delta);
+      PatchJumpWith8BitOperand(bytecodes, jump_location, delta);
       break;
     case OperandScale::kDouble:
-      PatchIndirectJumpWith16BitOperand(jump_location + prefix_offset, delta);
+      PatchJumpWith16BitOperand(bytecodes, jump_location + prefix_offset,
+                                delta);
       break;
     case OperandScale::kQuadruple:
-      PatchIndirectJumpWith32BitOperand(jump_location + prefix_offset, delta);
+      PatchJumpWith32BitOperand(bytecodes, jump_location + prefix_offset,
+                                delta);
       break;
     default:
       UNREACHABLE();
@@ -802,25 +662,20 @@
   // Don't emit dead code.
   if (exit_seen_in_block_) return *this;
 
-  // Check if the value in accumulator is boolean, if not choose an
-  // appropriate JumpIfToBoolean bytecode.
-  if (NeedToBooleanCast()) {
-    jump_bytecode = GetJumpWithToBoolean(jump_bytecode);
-  }
-
   if (label->is_bound()) {
     // Label has been bound already so this is a backwards jump.
-    CHECK_GE(bytecodes()->size(), label->offset());
-    CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
-    size_t abs_delta = bytecodes()->size() - label->offset();
+    size_t current_offset = pipeline()->FlushForOffset();
+    CHECK_GE(current_offset, label->offset());
+    CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
+    size_t abs_delta = current_offset - label->offset();
     int delta = -static_cast<int>(abs_delta);
-    OperandSize operand_size = SizeForSignedOperand(delta);
+    OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
     if (operand_size > OperandSize::kByte) {
       // Adjust for scaling byte prefix for wide jump offset.
       DCHECK_LE(delta, 0);
       delta -= 1;
     }
-    OutputScaled(jump_bytecode, OperandSizesToScale(operand_size),
+    OutputScaled(jump_bytecode, Bytecodes::OperandSizesToScale(operand_size),
                  SignedOperand(delta, operand_size));
   } else {
     // The label has not yet been bound so this is a forward reference
@@ -829,43 +684,58 @@
     // when the label is bound. The reservation means the maximum size
     // of the operand for the constant is known and the jump can
     // be emitted into the bytecode stream with space for the operand.
-    label->set_referrer(bytecodes()->size());
     unbound_jumps_++;
     OperandSize reserved_operand_size =
         constant_array_builder()->CreateReservedEntry();
-    OutputScaled(jump_bytecode, OperandSizesToScale(reserved_operand_size), 0);
+    OutputScaled(jump_bytecode,
+                 Bytecodes::OperandSizesToScale(reserved_operand_size), 0);
+
+    // Calculate the label position by flushing for offset after emitting the
+    // jump bytecode.
+    size_t offset = pipeline()->FlushForOffset();
+    OperandScale operand_scale =
+        Bytecodes::OperandSizesToScale(reserved_operand_size);
+    offset -= Bytecodes::Size(jump_bytecode, operand_scale);
+    if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
+      offset -= 1;
+    }
+    label->set_referrer(offset);
   }
   LeaveBasicBlock();
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
   return OutputJump(Bytecode::kJump, label);
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
-  return OutputJump(Bytecode::kJumpIfTrue, label);
+  // The peephole optimizer attempts to simplify JumpIfToBooleanTrue
+  // to JumpIfTrue.
+  return OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
-  return OutputJump(Bytecode::kJumpIfFalse, label);
+  // The peephole optimizer attempts to simplify JumpIfToBooleanFalse
+  // to JumpIfFalse.
+  return OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
   return OutputJump(Bytecode::kJumpIfNull, label);
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
     BytecodeLabel* label) {
   return OutputJump(Bytecode::kJumpIfUndefined, label);
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck() {
+BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
+  if (position != RelocInfo::kNoPosition) {
+    // We need to attach a non-breakable source position to a stack check,
+    // so we simply add it as expression position.
+    latest_source_info_.Update({position, false});
+  }
   Output(Bytecode::kStackCheck);
   return *this;
 }
@@ -904,7 +774,7 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
     Register cache_info_triple) {
   OperandScale operand_scale =
-      OperandSizesToScale(SizeForRegisterOperand(cache_info_triple));
+      Bytecodes::OperandSizesToScale(cache_info_triple.SizeOfOperand());
   OutputScaled(Bytecode::kForInPrepare, operand_scale,
                RegisterOperand(cache_info_triple));
   return *this;
@@ -912,8 +782,8 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
                                                       Register cache_length) {
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(index), SizeForRegisterOperand(cache_length));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      index.SizeOfOperand(), cache_length.SizeOfOperand());
   OutputScaled(Bytecode::kForInDone, operand_scale, RegisterOperand(index),
                RegisterOperand(cache_length));
   return *this;
@@ -922,10 +792,10 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
     Register receiver, Register index, Register cache_type_array_pair,
     int feedback_slot) {
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(receiver), SizeForRegisterOperand(index),
-      SizeForRegisterOperand(cache_type_array_pair),
-      SizeForUnsignedOperand(feedback_slot));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      receiver.SizeOfOperand(), index.SizeOfOperand(),
+      cache_type_array_pair.SizeOfOperand(),
+      Bytecodes::SizeForUnsignedOperand(feedback_slot));
   OutputScaled(Bytecode::kForInNext, operand_scale, RegisterOperand(receiver),
                RegisterOperand(index), RegisterOperand(cache_type_array_pair),
                UnsignedOperand(feedback_slot));
@@ -935,15 +805,36 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
   OperandScale operand_scale =
-      OperandSizesToScale(SizeForRegisterOperand(index));
+      Bytecodes::OperandSizesToScale(index.SizeOfOperand());
   OutputScaled(Bytecode::kForInStep, operand_scale, RegisterOperand(index));
   return *this;
 }
 
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
+    Register generator) {
+  OperandScale operand_scale =
+      Bytecodes::OperandSizesToScale(generator.SizeOfOperand());
+  OutputScaled(Bytecode::kSuspendGenerator, operand_scale,
+               RegisterOperand(generator));
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
+    Register generator) {
+  OperandScale operand_scale =
+      Bytecodes::OperandSizesToScale(generator.SizeOfOperand());
+  OutputScaled(Bytecode::kResumeGenerator, operand_scale,
+               RegisterOperand(generator));
+  return *this;
+}
+
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
                                                         bool will_catch) {
-  handler_table_builder()->SetHandlerTarget(handler_id, bytecodes()->size());
+  size_t offset = pipeline()->FlushForOffset();
+  handler_table_builder()->SetHandlerTarget(handler_id, offset);
   handler_table_builder()->SetPrediction(handler_id, will_catch);
   return *this;
 }
@@ -951,21 +842,23 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
                                                          Register context) {
-  handler_table_builder()->SetTryRegionStart(handler_id, bytecodes()->size());
+  size_t offset = pipeline()->FlushForOffset();
+  handler_table_builder()->SetTryRegionStart(handler_id, offset);
   handler_table_builder()->SetContextRegister(handler_id, context);
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
-  handler_table_builder()->SetTryRegionEnd(handler_id, bytecodes()->size());
+  size_t offset = pipeline()->FlushForOffset();
+  handler_table_builder()->SetTryRegionEnd(handler_id, offset);
   return *this;
 }
 
 
 void BytecodeArrayBuilder::LeaveBasicBlock() {
-  last_block_end_ = bytecodes()->size();
   exit_seen_in_block_ = false;
+  pipeline()->FlushBasicBlock();
 }
 
 void BytecodeArrayBuilder::EnsureReturn() {
@@ -982,10 +875,10 @@
                                                  int feedback_slot,
                                                  TailCallMode tail_call_mode) {
   Bytecode bytecode = BytecodeForCall(tail_call_mode);
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(callable), SizeForRegisterOperand(receiver_args),
-      SizeForUnsignedOperand(receiver_args_count),
-      SizeForUnsignedOperand(feedback_slot));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      callable.SizeOfOperand(), receiver_args.SizeOfOperand(),
+      Bytecodes::SizeForUnsignedOperand(receiver_args_count),
+      Bytecodes::SizeForUnsignedOperand(feedback_slot));
   OutputScaled(bytecode, operand_scale, RegisterOperand(callable),
                RegisterOperand(receiver_args),
                UnsignedOperand(receiver_args_count),
@@ -1000,9 +893,9 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(constructor), SizeForRegisterOperand(first_arg),
-      SizeForUnsignedOperand(arg_count));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      constructor.SizeOfOperand(), first_arg.SizeOfOperand(),
+      Bytecodes::SizeForUnsignedOperand(arg_count));
   OutputScaled(Bytecode::kNew, operand_scale, RegisterOperand(constructor),
                RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
@@ -1012,7 +905,7 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
-  DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+  DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
@@ -1020,8 +913,8 @@
   Bytecode bytecode = IntrinsicsHelper::IsSupported(function_id)
                           ? Bytecode::kInvokeIntrinsic
                           : Bytecode::kCallRuntime;
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      first_arg.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(arg_count));
   OutputScaled(bytecode, operand_scale, static_cast<uint16_t>(function_id),
                RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
@@ -1032,14 +925,14 @@
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
     Register first_return) {
   DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
-  DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+  DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  OperandScale operand_scale = OperandSizesToScale(
-      SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count),
-      SizeForRegisterOperand(first_return));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      first_arg.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(arg_count),
+      first_return.SizeOfOperand());
   OutputScaled(Bytecode::kCallRuntimeForPair, operand_scale,
                static_cast<uint16_t>(function_id), RegisterOperand(first_arg),
                UnsignedOperand(arg_count), RegisterOperand(first_return));
@@ -1048,10 +941,10 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
     int context_index, Register receiver_args, size_t receiver_args_count) {
-  OperandScale operand_scale =
-      OperandSizesToScale(SizeForUnsignedOperand(context_index),
-                          SizeForRegisterOperand(receiver_args),
-                          SizeForUnsignedOperand(receiver_args_count));
+  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+      Bytecodes::SizeForUnsignedOperand(context_index),
+      receiver_args.SizeOfOperand(),
+      Bytecodes::SizeForUnsignedOperand(receiver_args_count));
   OutputScaled(Bytecode::kCallJSRuntime, operand_scale,
                UnsignedOperand(context_index), RegisterOperand(receiver_args),
                UnsignedOperand(receiver_args_count));
@@ -1062,7 +955,7 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
   OperandScale operand_scale =
-      OperandSizesToScale(SizeForRegisterOperand(object));
+      Bytecodes::OperandSizesToScale(object.SizeOfOperand());
   OutputScaled(BytecodeForDelete(language_mode), operand_scale,
                RegisterOperand(object));
   return *this;
@@ -1075,29 +968,25 @@
 void BytecodeArrayBuilder::SetReturnPosition() {
   if (return_position_ == RelocInfo::kNoPosition) return;
   if (exit_seen_in_block_) return;
-  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
-                                                      return_position_);
+  latest_source_info_.Update({return_position_, true});
 }
 
 void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
   if (stmt->position() == RelocInfo::kNoPosition) return;
   if (exit_seen_in_block_) return;
-  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
-                                                      stmt->position());
+  latest_source_info_.Update({stmt->position(), true});
 }
 
 void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
   if (exit_seen_in_block_) return;
-  source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
-                                                       expr->position());
+  latest_source_info_.Update({expr->position(), false});
 }
 
 void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
   if (exit_seen_in_block_) return;
-  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
-                                                      expr->position());
+  latest_source_info_.Update({expr->position(), true});
 }
 
 bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
@@ -1129,10 +1018,10 @@
     case OperandType::kRuntimeId:
     case OperandType::kImm: {
       size_t unsigned_value = static_cast<size_t>(operand_value);
-      return SizeForUnsignedOperand(unsigned_value) <= operand_size;
+      return Bytecodes::SizeForUnsignedOperand(unsigned_value) <= operand_size;
     }
     case OperandType::kMaybeReg:
-      if (operand_value == 0) {
+      if (RegisterFromOperand(operand_value) == Register(0)) {
         return true;
       }
     // Fall-through to kReg case.
@@ -1169,7 +1058,7 @@
     return false;
   }
 
-  if (SizeForRegisterOperand(reg) > reg_size) {
+  if (reg.SizeOfOperand() > reg_size) {
     return false;
   }
 
@@ -1186,25 +1075,6 @@
   }
 }
 
-
-bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
-  return last_bytecode_start_ < bytecodes()->size() &&
-         last_bytecode_start_ >= last_block_end_;
-}
-
-
-bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
-  if (LastBytecodeInSameBlock()) {
-    PreviousBytecodeHelper previous_bytecode(*this);
-    Bytecode bytecode = previous_bytecode.GetBytecode();
-    if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
-      return previous_bytecode.GetRegisterOperand(0) == reg;
-    }
-  }
-  return false;
-}
-
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
   switch (op) {
@@ -1386,76 +1256,6 @@
   return Bytecode::kIllegal;
 }
 
-// static
-OperandSize BytecodeArrayBuilder::SizeForRegisterOperand(Register value) {
-  if (value.is_byte_operand()) {
-    return OperandSize::kByte;
-  } else if (value.is_short_operand()) {
-    return OperandSize::kShort;
-  } else {
-    return OperandSize::kQuad;
-  }
-}
-
-// static
-OperandSize BytecodeArrayBuilder::SizeForSignedOperand(int value) {
-  if (kMinInt8 <= value && value <= kMaxInt8) {
-    return OperandSize::kByte;
-  } else if (kMinInt16 <= value && value <= kMaxInt16) {
-    return OperandSize::kShort;
-  } else {
-    return OperandSize::kQuad;
-  }
-}
-
-// static
-OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(int value) {
-  DCHECK_GE(value, 0);
-  if (value <= kMaxUInt8) {
-    return OperandSize::kByte;
-  } else if (value <= kMaxUInt16) {
-    return OperandSize::kShort;
-  } else {
-    return OperandSize::kQuad;
-  }
-}
-
-OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(size_t value) {
-  if (value <= static_cast<size_t>(kMaxUInt8)) {
-    return OperandSize::kByte;
-  } else if (value <= static_cast<size_t>(kMaxUInt16)) {
-    return OperandSize::kShort;
-  } else if (value <= kMaxUInt32) {
-    return OperandSize::kQuad;
-  } else {
-    UNREACHABLE();
-    return OperandSize::kQuad;
-  }
-}
-
-OperandScale BytecodeArrayBuilder::OperandSizesToScale(OperandSize size0,
-                                                       OperandSize size1,
-                                                       OperandSize size2,
-                                                       OperandSize size3) {
-  OperandSize upper = std::max(size0, size1);
-  OperandSize lower = std::max(size2, size3);
-  OperandSize result = std::max(upper, lower);
-  // Operand sizes have been scaled before calling this function.
-  // Currently all scalable operands are byte sized at
-  // OperandScale::kSingle.
-  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
-                    static_cast<int>(OperandScale::kSingle) &&
-                static_cast<int>(OperandSize::kShort) ==
-                    static_cast<int>(OperandScale::kDouble) &&
-                static_cast<int>(OperandSize::kQuad) ==
-                    static_cast<int>(OperandScale::kQuadruple));
-  OperandScale operand_scale = static_cast<OperandScale>(result);
-  DCHECK(operand_scale == OperandScale::kSingle ||
-         operand_scale == OperandScale::kDouble ||
-         operand_scale == OperandScale::kQuadruple);
-  return operand_scale;
-}
-
 uint32_t BytecodeArrayBuilder::RegisterOperand(Register reg) {
   return static_cast<uint32_t>(reg.ToOperand());
 }
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index 4446a63..3930a06 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -6,6 +6,7 @@
 #define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
 
 #include "src/ast/ast.h"
+#include "src/interpreter/bytecode-array-writer.h"
 #include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/constant-array-builder.h"
@@ -21,6 +22,8 @@
 namespace interpreter {
 
 class BytecodeLabel;
+class BytecodeNode;
+class BytecodePipelineStage;
 class Register;
 
 class BytecodeArrayBuilder final : public ZoneObject {
@@ -28,7 +31,6 @@
   BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
                        int context_count, int locals_count,
                        FunctionLiteral* literal = nullptr);
-  ~BytecodeArrayBuilder();
 
   Handle<BytecodeArray> ToBytecodeArray();
 
@@ -224,7 +226,7 @@
   BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
 
-  BytecodeArrayBuilder& StackCheck();
+  BytecodeArrayBuilder& StackCheck(int position);
 
   BytecodeArrayBuilder& Throw();
   BytecodeArrayBuilder& ReThrow();
@@ -241,6 +243,10 @@
                                   int feedback_slot);
   BytecodeArrayBuilder& ForInStep(Register index);
 
+  // Generators.
+  BytecodeArrayBuilder& SuspendGenerator(Register generator);
+  BytecodeArrayBuilder& ResumeGenerator(Register generator);
+
   // Exception handling.
   BytecodeArrayBuilder& MarkHandler(int handler_id, bool will_catch);
   BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
@@ -257,26 +263,16 @@
   void SetExpressionAsStatementPosition(Expression* expr);
 
   // Accessors
-  Zone* zone() const { return zone_; }
   TemporaryRegisterAllocator* temporary_register_allocator() {
     return &temporary_allocator_;
   }
   const TemporaryRegisterAllocator* temporary_register_allocator() const {
     return &temporary_allocator_;
   }
+  Zone* zone() const { return zone_; }
 
   void EnsureReturn();
 
-  static OperandScale OperandSizesToScale(
-      OperandSize size0, OperandSize size1 = OperandSize::kByte,
-      OperandSize size2 = OperandSize::kByte,
-      OperandSize size3 = OperandSize::kByte);
-
-  static OperandSize SizeForRegisterOperand(Register reg);
-  static OperandSize SizeForSignedOperand(int value);
-  static OperandSize SizeForUnsignedOperand(int value);
-  static OperandSize SizeForUnsignedOperand(size_t value);
-
   static uint32_t RegisterOperand(Register reg);
   static Register RegisterFromOperand(uint32_t operand);
   static uint32_t SignedOperand(int value, OperandSize size);
@@ -284,7 +280,6 @@
   static uint32_t UnsignedOperand(size_t value);
 
  private:
-  class PreviousBytecodeHelper;
   friend class BytecodeRegisterAllocator;
 
   static Bytecode BytecodeForBinaryOperation(Token::Value op);
@@ -300,11 +295,7 @@
   static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
 
   static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
-  static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
 
-  template <size_t N>
-  INLINE(void Output(Bytecode bytecode, uint32_t (&operands)[N],
-                     OperandScale operand_scale = OperandScale::kSingle));
   void Output(Bytecode bytecode);
   void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
                     uint32_t operand0, uint32_t operand1, uint32_t operand2,
@@ -318,14 +309,13 @@
 
   BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
                                    BytecodeLabel* label);
-  void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
-                 const ZoneVector<uint8_t>::iterator& jump_location);
-  void PatchIndirectJumpWith8BitOperand(
-      const ZoneVector<uint8_t>::iterator& jump_location, int delta);
-  void PatchIndirectJumpWith16BitOperand(
-      const ZoneVector<uint8_t>::iterator& jump_location, int delta);
-  void PatchIndirectJumpWith32BitOperand(
-      const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+  void PatchJump(size_t jump_target, size_t jump_location);
+  void PatchJumpWith8BitOperand(ZoneVector<uint8_t>* bytecodes,
+                                size_t jump_location, int delta);
+  void PatchJumpWith16BitOperand(ZoneVector<uint8_t>* bytecodes,
+                                 size_t jump_location, int delta);
+  void PatchJumpWith32BitOperand(ZoneVector<uint8_t>* bytecodes,
+                                 size_t jump_location, int delta);
 
   void LeaveBasicBlock();
 
@@ -333,9 +323,8 @@
                       int operand_index, uint32_t operand_value) const;
   bool RegisterIsValid(Register reg, OperandSize reg_size) const;
 
-  bool LastBytecodeInSameBlock() const;
-  bool NeedToBooleanCast();
-  bool IsRegisterInAccumulator(Register reg);
+  // Attach latest source position to |node|.
+  void AttachSourceInfo(BytecodeNode* node);
 
   // Set position for return.
   void SetReturnPosition();
@@ -343,9 +332,16 @@
   // Gets a constant pool entry for the |object|.
   size_t GetConstantPoolEntry(Handle<Object> object);
 
-  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
-  const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
+  // Not implemented as the illegal bytecode is used inside internally
+  // to indicate a bytecode field is not valid or an error has occured
+  // during bytecode generation.
+  BytecodeArrayBuilder& Illegal();
+
   Isolate* isolate() const { return isolate_; }
+  BytecodeArrayWriter* bytecode_array_writer() {
+    return &bytecode_array_writer_;
+  }
+  BytecodePipelineStage* pipeline() { return pipeline_; }
   ConstantArrayBuilder* constant_array_builder() {
     return &constant_array_builder_;
   }
@@ -361,13 +357,10 @@
 
   Isolate* isolate_;
   Zone* zone_;
-  ZoneVector<uint8_t> bytecodes_;
   bool bytecode_generated_;
   ConstantArrayBuilder constant_array_builder_;
   HandlerTableBuilder handler_table_builder_;
   SourcePositionTableBuilder source_position_table_builder_;
-  size_t last_block_end_;
-  size_t last_bytecode_start_;
   bool exit_seen_in_block_;
   int unbound_jumps_;
   int parameter_count_;
@@ -375,6 +368,9 @@
   int context_register_count_;
   int return_position_;
   TemporaryRegisterAllocator temporary_allocator_;
+  BytecodeArrayWriter bytecode_array_writer_;
+  BytecodePipelineStage* pipeline_;
+  BytecodeSourceInfo latest_source_info_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
 };
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index a17efcb..319d2a0 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -128,26 +128,15 @@
 }
 
 int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
-  interpreter::OperandType operand_type =
-      Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
-  switch (operand_type) {
-    case OperandType::kRegPair:
-    case OperandType::kRegOutPair:
-      return 2;
-    case OperandType::kRegOutTriple:
-      return 3;
-    default: {
-      if (operand_index + 1 !=
-          Bytecodes::NumberOfOperands(current_bytecode())) {
-        OperandType next_operand_type =
-            Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
-        if (OperandType::kRegCount == next_operand_type) {
-          return GetRegisterCountOperand(operand_index + 1);
-        }
-      }
-      return 1;
-    }
+  DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+  const OperandType* operand_types =
+      Bytecodes::GetOperandTypes(current_bytecode());
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_types[operand_index]));
+  if (operand_types[operand_index + 1] == OperandType::kRegCount) {
+    return GetRegisterCountOperand(operand_index + 1);
+  } else {
+    OperandType operand_type = operand_types[operand_index];
+    return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
   }
 }
 
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc
new file mode 100644
index 0000000..029688e
--- /dev/null
+++ b/src/interpreter/bytecode-array-writer.cc
@@ -0,0 +1,105 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-writer.h"
+
+#include <iomanip>
+#include "src/interpreter/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayWriter::BytecodeArrayWriter(
+    Zone* zone, SourcePositionTableBuilder* source_position_table_builder)
+    : bytecodes_(zone),
+      max_register_count_(0),
+      source_position_table_builder_(source_position_table_builder) {}
+
+// override
+BytecodeArrayWriter::~BytecodeArrayWriter() {}
+
+// override
+size_t BytecodeArrayWriter::FlushForOffset() { return bytecodes()->size(); }
+
+// override
+void BytecodeArrayWriter::Write(BytecodeNode* node) {
+  UpdateSourcePositionTable(node);
+  EmitBytecode(node);
+}
+
+void BytecodeArrayWriter::UpdateSourcePositionTable(
+    const BytecodeNode* const node) {
+  int bytecode_offset = static_cast<int>(bytecodes()->size());
+  const BytecodeSourceInfo& source_info = node->source_info();
+  if (source_info.is_valid()) {
+    source_position_table_builder_->AddPosition(bytecode_offset,
+                                                source_info.source_position(),
+                                                source_info.is_statement());
+  }
+}
+
+void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
+  DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
+
+  OperandScale operand_scale = node->operand_scale();
+  if (operand_scale != OperandScale::kSingle) {
+    Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
+    bytecodes()->push_back(Bytecodes::ToByte(prefix));
+  }
+
+  Bytecode bytecode = node->bytecode();
+  bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+
+  int register_operand_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
+  const uint32_t* const operands = node->operands();
+  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+  for (int i = 0; operand_types[i] != OperandType::kNone; ++i) {
+    OperandType operand_type = operand_types[i];
+    switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+      case OperandSize::kNone:
+        UNREACHABLE();
+        break;
+      case OperandSize::kByte:
+        bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
+        break;
+      case OperandSize::kShort: {
+        uint8_t operand_bytes[2];
+        WriteUnalignedUInt16(operand_bytes, operands[i]);
+        bytecodes()->insert(bytecodes()->end(), operand_bytes,
+                            operand_bytes + 2);
+        break;
+      }
+      case OperandSize::kQuad: {
+        uint8_t operand_bytes[4];
+        WriteUnalignedUInt32(operand_bytes, operands[i]);
+        bytecodes()->insert(bytecodes()->end(), operand_bytes,
+                            operand_bytes + 4);
+        break;
+      }
+    }
+
+    if ((register_operand_bitmap >> i) & 1) {
+      int count;
+      if (operand_types[i + 1] == OperandType::kRegCount) {
+        count = static_cast<int>(operands[i + 1]);
+      } else {
+        count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+      }
+      Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
+      max_register_count_ = std::max(max_register_count_, reg.index() + count);
+    }
+  }
+}
+
+// override
+void BytecodeArrayWriter::FlushBasicBlock() {}
+
+int BytecodeArrayWriter::GetMaximumFrameSizeUsed() {
+  return max_register_count_ * kPointerSize;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-array-writer.h b/src/interpreter/bytecode-array-writer.h
new file mode 100644
index 0000000..b1303c9
--- /dev/null
+++ b/src/interpreter/bytecode-array-writer.h
@@ -0,0 +1,50 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class SourcePositionTableBuilder;
+
+// Class for emitting bytecode as the final stage of the bytecode
+// generation pipeline.
+class BytecodeArrayWriter final : public BytecodePipelineStage {
+ public:
+  BytecodeArrayWriter(
+      Zone* zone, SourcePositionTableBuilder* source_position_table_builder);
+  virtual ~BytecodeArrayWriter();
+
+  void Write(BytecodeNode* node) override;
+  size_t FlushForOffset() override;
+  void FlushBasicBlock() override;
+
+  // Get the bytecode vector.
+  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+
+  // Returns the size in bytes of the frame associated with the
+  // bytecode written.
+  int GetMaximumFrameSizeUsed();
+
+ private:
+  void EmitBytecode(const BytecodeNode* const node);
+  void UpdateSourcePositionTable(const BytecodeNode* const node);
+
+  ZoneVector<uint8_t> bytecodes_;
+  int max_register_count_;
+  SourcePositionTableBuilder* source_position_table_builder_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index b0fa245..650234a 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -5,6 +5,7 @@
 #include "src/interpreter/bytecode-generator.h"
 
 #include "src/ast/scopes.h"
+#include "src/code-stubs.h"
 #include "src/compiler.h"
 #include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/control-flow-builders.h"
@@ -554,38 +555,41 @@
   Register result_register_;
 };
 
-BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
-    : isolate_(isolate),
-      zone_(zone),
-      builder_(nullptr),
-      info_(nullptr),
-      scope_(nullptr),
-      globals_(0, zone),
+BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
+    : isolate_(info->isolate()),
+      zone_(info->zone()),
+      builder_(new (zone()) BytecodeArrayBuilder(
+          info->isolate(), info->zone(), info->num_parameters_including_this(),
+          info->scope()->MaxNestedContextChainLength(),
+          info->scope()->num_stack_slots(), info->literal())),
+      info_(info),
+      scope_(info->scope()),
+      globals_(0, info->zone()),
       execution_control_(nullptr),
       execution_context_(nullptr),
       execution_result_(nullptr),
       register_allocator_(nullptr),
+      generator_resume_points_(info->literal()->yield_count(), info->zone()),
+      generator_state_(),
       try_catch_nesting_level_(0),
       try_finally_nesting_level_(0) {
-  InitializeAstVisitor(isolate);
+  InitializeAstVisitor(isolate());
 }
 
-Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
-  set_info(info);
-  set_scope(info->scope());
-
-  // Initialize bytecode array builder.
-  set_builder(new (zone()) BytecodeArrayBuilder(
-      isolate(), zone(), info->num_parameters_including_this(),
-      scope()->MaxNestedContextChainLength(), scope()->num_stack_slots(),
-      info->literal()));
-
+Handle<BytecodeArray> BytecodeGenerator::MakeBytecode() {
   // Initialize the incoming context.
   ContextScope incoming_context(this, scope(), false);
 
   // Initialize control scope.
   ControlScopeForTopLevel control(this);
 
+  RegisterAllocationScope register_scope(this);
+
+  if (IsGeneratorFunction(info()->literal()->kind())) {
+    generator_state_ = register_allocator()->NewRegister();
+    VisitGeneratorPrologue();
+  }
+
   // Build function context only if there are context allocated variables.
   if (scope()->NeedsContext()) {
     // Push a new inner context scope for the function.
@@ -597,9 +601,15 @@
     MakeBytecodeBody();
   }
 
+  // In generator functions, we may not have visited every yield in the AST
+  // since we skip some obviously dead code. Hence the generated bytecode may
+  // contain jumps to unbound labels (resume points that will never be used).
+  // We bind these now.
+  for (auto& label : generator_resume_points_) {
+    if (!label.is_bound()) builder()->Bind(&label);
+  }
+
   builder()->EnsureReturn();
-  set_scope(nullptr);
-  set_info(nullptr);
   return builder()->ToBytecodeArray();
 }
 
@@ -628,12 +638,90 @@
   VisitDeclarations(scope()->declarations());
 
   // Perform a stack-check before the body.
-  builder()->StackCheck();
+  builder()->StackCheck(info()->literal()->start_position());
 
   // Visit statements in the function body.
   VisitStatements(info()->literal()->body());
 }
 
+void BytecodeGenerator::BuildIndexedJump(Register index, size_t start_index,
+                                         size_t size,
+                                         ZoneVector<BytecodeLabel>& targets) {
+  // TODO(neis): Optimize this by using a proper jump table.
+  for (size_t i = start_index; i < start_index + size; i++) {
+    DCHECK(0 <= i && i < targets.size());
+    builder()
+        ->LoadLiteral(Smi::FromInt(static_cast<int>(i)))
+        .CompareOperation(Token::Value::EQ_STRICT, index)
+        .JumpIfTrue(&(targets[i]));
+  }
+
+  RegisterAllocationScope register_scope(this);
+  Register reason = register_allocator()->NewRegister();
+  BailoutReason bailout_reason = BailoutReason::kInvalidJumpTableIndex;
+  builder()
+      ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
+      .StoreAccumulatorInRegister(reason)
+      .CallRuntime(Runtime::kAbort, reason, 1);
+}
+
+void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
+                                             LoopBuilder* loop_builder) {
+  // Recall that stmt->yield_count() is always zero inside ordinary
+  // (i.e. non-generator) functions.
+
+  // Collect all labels for generator resume points within the loop (if any) so
+  // that they can be bound to the loop header below. Also create fresh labels
+  // for these resume points, to be used inside the loop.
+  ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
+  size_t first_yield = stmt->first_yield_id();
+  for (size_t id = first_yield; id < first_yield + stmt->yield_count(); id++) {
+    DCHECK(0 <= id && id < generator_resume_points_.size());
+    auto& label = generator_resume_points_[id];
+    resume_points_in_loop.push_back(label);
+    generator_resume_points_[id] = BytecodeLabel();
+  }
+
+  loop_builder->LoopHeader(&resume_points_in_loop);
+
+  if (stmt->yield_count() > 0) {
+    // If we are not resuming, fall through to loop body.
+    // If we are resuming, perform state dispatch.
+    BytecodeLabel not_resuming;
+    builder()
+        ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+        .CompareOperation(Token::Value::EQ, generator_state_)
+        .JumpIfTrue(&not_resuming);
+    BuildIndexedJump(generator_state_, first_yield,
+        stmt->yield_count(), generator_resume_points_);
+    builder()->Bind(&not_resuming);
+  }
+}
+
+void BytecodeGenerator::VisitGeneratorPrologue() {
+  // The generator resume trampoline abuses the new.target register both to
+  // indicate that this is a resume call and to pass in the generator object.
+  // In ordinary calls, new.target is always undefined because generator
+  // functions are non-constructable.
+  Register generator_object = Register::new_target();
+  BytecodeLabel regular_call;
+  builder()
+      ->LoadAccumulatorWithRegister(generator_object)
+      .JumpIfUndefined(&regular_call);
+
+  // This is a resume call. Restore registers and perform state dispatch.
+  // (The current context has already been restored by the trampoline.)
+  builder()
+      ->ResumeGenerator(generator_object)
+      .StoreAccumulatorInRegister(generator_state_);
+  BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
+                   generator_resume_points_);
+
+  builder()->Bind(&regular_call);
+  // This is a regular call. Fall through to the ordinary function prologue,
+  // after which we will run into the generator object creation and the initial
+  // yield (both inserted by the parser).
+}
 
 void BytecodeGenerator::VisitBlock(Block* stmt) {
   // Visit declarations and statements.
@@ -663,17 +751,14 @@
   VariableMode mode = decl->mode();
   // Const and let variables are initialized with the hole so that we can
   // check that they are only assigned once.
-  bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+  bool hole_init = mode == CONST || mode == LET;
   switch (variable->location()) {
     case VariableLocation::GLOBAL:
-    case VariableLocation::UNALLOCATED: {
-      Handle<Oddball> value = variable->binding_needs_init()
-                                  ? isolate()->factory()->the_hole_value()
-                                  : isolate()->factory()->undefined_value();
+    case VariableLocation::UNALLOCATED:
+      DCHECK(!variable->binding_needs_init());
       globals()->push_back(variable->name());
-      globals()->push_back(value);
+      globals()->push_back(isolate()->factory()->undefined_value());
       break;
-    }
     case VariableLocation::LOCAL:
       if (hole_init) {
         Register destination(variable->index());
@@ -793,9 +878,7 @@
   Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
       static_cast<int>(globals()->size()), TENURED);
   for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
-  int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
-                      DeclareGlobalsNativeFlag::encode(info()->is_native()) |
-                      DeclareGlobalsLanguageMode::encode(language_mode());
+  int encoded_flags = info()->GetDeclareGlobalsFlags();
 
   Register pairs = register_allocator()->NewRegister();
   builder()->LoadLiteral(data);
@@ -955,23 +1038,21 @@
 void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
                                            LoopBuilder* loop_builder) {
   ControlScopeForIteration execution_control(this, stmt, loop_builder);
-  builder()->StackCheck();
+  builder()->StackCheck(stmt->position());
   Visit(stmt->body());
+  loop_builder->SetContinueTarget();
 }
 
 void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
   LoopBuilder loop_builder(builder());
-  loop_builder.LoopHeader();
+  VisitIterationHeader(stmt, &loop_builder);
   if (stmt->cond()->ToBooleanIsFalse()) {
     VisitIterationBody(stmt, &loop_builder);
-    loop_builder.Condition();
   } else if (stmt->cond()->ToBooleanIsTrue()) {
-    loop_builder.Condition();
     VisitIterationBody(stmt, &loop_builder);
     loop_builder.JumpToHeader();
   } else {
     VisitIterationBody(stmt, &loop_builder);
-    loop_builder.Condition();
     builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.JumpToHeaderIfTrue();
@@ -986,8 +1067,7 @@
   }
 
   LoopBuilder loop_builder(builder());
-  loop_builder.LoopHeader();
-  loop_builder.Condition();
+  VisitIterationHeader(stmt, &loop_builder);
   if (!stmt->cond()->ToBooleanIsTrue()) {
     builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
@@ -1010,8 +1090,7 @@
   }
 
   LoopBuilder loop_builder(builder());
-  loop_builder.LoopHeader();
-  loop_builder.Condition();
+  VisitIterationHeader(stmt, &loop_builder);
   if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
     builder()->SetExpressionAsStatementPosition(stmt->cond());
     VisitForAccumulatorValue(stmt->cond());
@@ -1019,7 +1098,6 @@
   }
   VisitIterationBody(stmt, &loop_builder);
   if (stmt->next() != nullptr) {
-    loop_builder.Next();
     builder()->SetStatementPosition(stmt->next());
     Visit(stmt->next());
   }
@@ -1135,9 +1213,8 @@
   builder()->StoreAccumulatorInRegister(index);
 
   // The loop
-  loop_builder.LoopHeader();
+  VisitIterationHeader(stmt, &loop_builder);
   builder()->SetExpressionAsStatementPosition(stmt->each());
-  loop_builder.Condition();
   builder()->ForInDone(index, cache_length);
   loop_builder.BreakIfTrue();
   DCHECK(Register::AreContiguous(cache_type, cache_array));
@@ -1146,7 +1223,6 @@
   loop_builder.ContinueIfUndefined();
   VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
   VisitIterationBody(stmt, &loop_builder);
-  loop_builder.Next();
   builder()->ForInStep(index);
   builder()->StoreAccumulatorInRegister(index);
   loop_builder.JumpToHeader();
@@ -1160,10 +1236,10 @@
   LoopBuilder loop_builder(builder());
   ControlScopeForIteration control_scope(this, stmt, &loop_builder);
 
+  builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
   VisitForEffect(stmt->assign_iterator());
 
-  loop_builder.LoopHeader();
-  loop_builder.Next();
+  VisitIterationHeader(stmt, &loop_builder);
   builder()->SetExpressionAsStatementPosition(stmt->next_result());
   VisitForEffect(stmt->next_result());
   VisitForAccumulatorValue(stmt->result_done());
@@ -1324,7 +1400,7 @@
       .StoreAccumulatorInRegister(prototype);
 
   VisitClassLiteralProperties(expr, literal, prototype);
-  builder()->CallRuntime(Runtime::kFinalizeClassDefinition, literal, 2);
+  builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
   // Assign to class variable.
   if (expr->class_variable_proxy() != nullptr) {
     Variable* var = expr->class_variable_proxy()->var();
@@ -1514,10 +1590,21 @@
 
 
 void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
-  // Deep-copy the literal boilerplate.
+  // Copy the literal boilerplate.
+  int fast_clone_properties_count = 0;
+  if (FastCloneShallowObjectStub::IsSupported(expr)) {
+    STATIC_ASSERT(
+        FastCloneShallowObjectStub::kMaximumClonedProperties <=
+        1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
+    fast_clone_properties_count =
+        FastCloneShallowObjectStub::PropertiesCount(expr->properties_count());
+  }
+  uint8_t flags =
+      CreateObjectLiteralFlags::FlagsBits::encode(expr->ComputeFlags()) |
+      CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
+          fast_clone_properties_count);
   builder()->CreateObjectLiteral(expr->constant_properties(),
-                                 expr->literal_index(),
-                                 expr->ComputeFlags(true));
+                                 expr->literal_index(), flags);
 
   // Allocate in the outer scope since this register is used to return the
   // expression's results to the caller.
@@ -1753,10 +1840,7 @@
 
 void BytecodeGenerator::BuildHoleCheckForVariableLoad(VariableMode mode,
                                                       Handle<String> name) {
-  if (mode == CONST_LEGACY) {
-    BytecodeLabel end_label;
-    builder()->JumpIfNotHole(&end_label).LoadUndefined().Bind(&end_label);
-  } else if (mode == LET || mode == CONST) {
+  if (mode == LET || mode == CONST) {
     BuildThrowIfHole(name);
   }
 }
@@ -1940,7 +2024,7 @@
   RegisterAllocationScope assignment_register_scope(this);
   BytecodeLabel end_label;
   bool hole_check_required =
-      (mode == CONST_LEGACY) || (mode == LET && op != Token::INIT) ||
+      (mode == LET && op != Token::INIT) ||
       (mode == CONST && op != Token::INIT) ||
       (mode == CONST && op == Token::INIT && variable->is_this());
   switch (variable->location()) {
@@ -1953,6 +2037,16 @@
         destination = Register(variable->index());
       }
 
+      if (mode == CONST_LEGACY && op != Token::INIT) {
+        if (is_strict(language_mode())) {
+          builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
+                                 0);
+        }
+        // Non-initializing assignments to legacy constants are ignored
+        // in sloppy mode. Break here to avoid storing into variable.
+        break;
+      }
+
       if (hole_check_required) {
         // Load destination to check for hole.
         Register value_temp = register_allocator()->NewRegister();
@@ -1960,28 +2054,9 @@
             ->StoreAccumulatorInRegister(value_temp)
             .LoadAccumulatorWithRegister(destination);
 
-        if (mode == CONST_LEGACY && op == Token::INIT) {
-          // Perform an intialization check for legacy constants.
-          builder()
-              ->JumpIfNotHole(&end_label)
-              .MoveRegister(value_temp, destination)
-              .Bind(&end_label)
-              .LoadAccumulatorWithRegister(value_temp);
-          // Break here because the value should not be stored unconditionally.
-          break;
-        } else if (mode == CONST_LEGACY && op != Token::INIT) {
-          DCHECK(!is_strict(language_mode()));
-          // Ensure accumulator is in the correct state.
-          builder()->LoadAccumulatorWithRegister(value_temp);
-          // Break here, non-initializing assignments to legacy constants are
-          // ignored.
-          break;
-        } else {
-          BuildHoleCheckForVariableAssignment(variable, op);
-          builder()->LoadAccumulatorWithRegister(value_temp);
-        }
+        BuildHoleCheckForVariableAssignment(variable, op);
+        builder()->LoadAccumulatorWithRegister(value_temp);
       }
-
       builder()->StoreAccumulatorInRegister(destination);
       break;
     }
@@ -2018,6 +2093,16 @@
         builder()->LoadAccumulatorWithRegister(value_temp);
       }
 
+      if (mode == CONST_LEGACY && op != Token::INIT) {
+        if (is_strict(language_mode())) {
+          builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
+                                 0);
+        }
+        // Non-initializing assignments to legacy constants are ignored
+        // in sloppy mode. Break here to avoid storing into variable.
+        break;
+      }
+
       if (hole_check_required) {
         // Load destination to check for hole.
         Register value_temp = register_allocator()->NewRegister();
@@ -2025,55 +2110,16 @@
             ->StoreAccumulatorInRegister(value_temp)
             .LoadContextSlot(context_reg, variable->index());
 
-        if (mode == CONST_LEGACY && op == Token::INIT) {
-          // Perform an intialization check for legacy constants.
-          builder()
-              ->JumpIfNotHole(&end_label)
-              .LoadAccumulatorWithRegister(value_temp)
-              .StoreContextSlot(context_reg, variable->index())
-              .Bind(&end_label);
-          builder()->LoadAccumulatorWithRegister(value_temp);
-          // Break here because the value should not be stored unconditionally.
-          // The above code performs the store conditionally.
-          break;
-        } else if (mode == CONST_LEGACY && op != Token::INIT) {
-          DCHECK(!is_strict(language_mode()));
-          // Ensure accumulator is in the correct state.
-          builder()->LoadAccumulatorWithRegister(value_temp);
-          // Break here, non-initializing assignments to legacy constants are
-          // ignored.
-          break;
-        } else {
-          BuildHoleCheckForVariableAssignment(variable, op);
-          builder()->LoadAccumulatorWithRegister(value_temp);
-        }
+        BuildHoleCheckForVariableAssignment(variable, op);
+        builder()->LoadAccumulatorWithRegister(value_temp);
       }
 
       builder()->StoreContextSlot(context_reg, variable->index());
       break;
     }
     case VariableLocation::LOOKUP: {
-      if (mode == CONST_LEGACY && op == Token::INIT) {
-        register_allocator()->PrepareForConsecutiveAllocations(3);
-        Register value = register_allocator()->NextConsecutiveRegister();
-        Register context = register_allocator()->NextConsecutiveRegister();
-        Register name = register_allocator()->NextConsecutiveRegister();
-
-        // InitializeLegacyConstLookupSlot runtime call returns the 'value'
-        // passed to it. So, accumulator will have its original contents when
-        // runtime call returns.
-        builder()
-            ->StoreAccumulatorInRegister(value)
-            .MoveRegister(execution_context()->reg(), context)
-            .LoadLiteral(variable->name())
-            .StoreAccumulatorInRegister(name)
-            .CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, value, 3);
-      } else if (mode == CONST_LEGACY && op != Token::INIT) {
-        // Non-intializing assignments to legacy constants are ignored.
-        DCHECK(!is_strict(language_mode()));
-      } else {
-        builder()->StoreLookupSlot(variable->name(), language_mode());
-      }
+      DCHECK_NE(CONST_LEGACY, variable->mode());
+      builder()->StoreLookupSlot(variable->name(), language_mode());
       break;
     }
   }
@@ -2224,16 +2270,86 @@
   execution_result()->SetResultInAccumulator();
 }
 
+void BytecodeGenerator::VisitYield(Yield* expr) {
+  builder()->SetExpressionPosition(expr);
+  Register value = VisitForRegisterValue(expr->expression());
 
-void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
+  Register generator = VisitForRegisterValue(expr->generator_object());
 
+  // Save context, registers, and state. Then return.
+  builder()
+      ->LoadLiteral(Smi::FromInt(expr->yield_id()))
+      .SuspendGenerator(generator)
+      .LoadAccumulatorWithRegister(value)
+      .Return();  // Hard return (ignore any finally blocks).
+
+  builder()->Bind(&(generator_resume_points_[expr->yield_id()]));
+  // Upon resume, we continue here.
+
+  {
+    RegisterAllocationScope register_scope(this);
+
+    // Update state to indicate that we have finished resuming. Loop headers
+    // rely on this.
+    builder()
+        ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+        .StoreAccumulatorInRegister(generator_state_);
+
+    Register input = register_allocator()->NewRegister();
+    builder()
+        ->CallRuntime(Runtime::kGeneratorGetInput, generator, 1)
+        .StoreAccumulatorInRegister(input);
+
+    Register resume_mode = register_allocator()->NewRegister();
+    builder()
+        ->CallRuntime(Runtime::kGeneratorGetResumeMode, generator, 1)
+        .StoreAccumulatorInRegister(resume_mode);
+
+    // Now dispatch on resume mode.
+
+    BytecodeLabel resume_with_next;
+    BytecodeLabel resume_with_return;
+    BytecodeLabel resume_with_throw;
+
+    builder()
+        ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
+        .CompareOperation(Token::EQ_STRICT, resume_mode)
+        .JumpIfTrue(&resume_with_next)
+        .LoadLiteral(Smi::FromInt(JSGeneratorObject::kThrow))
+        .CompareOperation(Token::EQ_STRICT, resume_mode)
+        .JumpIfTrue(&resume_with_throw)
+        .Jump(&resume_with_return);
+
+    builder()->Bind(&resume_with_return);
+    {
+      register_allocator()->PrepareForConsecutiveAllocations(2);
+      Register value = register_allocator()->NextConsecutiveRegister();
+      Register done = register_allocator()->NextConsecutiveRegister();
+      builder()
+          ->MoveRegister(input, value)
+          .LoadTrue()
+          .StoreAccumulatorInRegister(done)
+          .CallRuntime(Runtime::kCreateIterResultObject, value, 2);
+      execution_control()->ReturnAccumulator();
+    }
+
+    builder()->Bind(&resume_with_throw);
+    builder()
+        ->LoadAccumulatorWithRegister(input)
+        .Throw();
+
+    builder()->Bind(&resume_with_next);
+    builder()->LoadAccumulatorWithRegister(input);
+  }
+  execution_result()->SetResultInAccumulator();
+}
 
 void BytecodeGenerator::VisitThrow(Throw* expr) {
   VisitForAccumulatorValue(expr->exception());
   builder()->SetExpressionPosition(expr);
   builder()->Throw();
-  // Throw statments are modeled as expression instead of statments. These are
-  // converted from assignment statements in Rewriter::ReWrite pass. An
+  // Throw statements are modeled as expressions instead of statements. These
+  // are converted from assignment statements in Rewriter::ReWrite pass. An
   // assignment statement expects a value in the accumulator. This is a hack to
   // avoid DCHECK fails assert accumulator has been set.
   execution_result()->SetResultInAccumulator();
@@ -2452,12 +2568,14 @@
   // callee value.
   if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
     RegisterAllocationScope inner_register_scope(this);
-    register_allocator()->PrepareForConsecutiveAllocations(5);
+    register_allocator()->PrepareForConsecutiveAllocations(6);
     Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
     Register source = register_allocator()->NextConsecutiveRegister();
     Register function = register_allocator()->NextConsecutiveRegister();
     Register language = register_allocator()->NextConsecutiveRegister();
-    Register position = register_allocator()->NextConsecutiveRegister();
+    Register eval_scope_position =
+        register_allocator()->NextConsecutiveRegister();
+    Register eval_position = register_allocator()->NextConsecutiveRegister();
 
     // Set up arguments for ResolvePossiblyDirectEval by copying callee, source
     // strings and function closure, and loading language and
@@ -2470,11 +2588,13 @@
         .StoreAccumulatorInRegister(language)
         .LoadLiteral(
             Smi::FromInt(execution_context()->scope()->start_position()))
-        .StoreAccumulatorInRegister(position);
+        .StoreAccumulatorInRegister(eval_scope_position)
+        .LoadLiteral(Smi::FromInt(expr->position()))
+        .StoreAccumulatorInRegister(eval_position);
 
     // Call ResolvePossiblyDirectEval and modify the callee.
     builder()
-        ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 5)
+        ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 6)
         .StoreAccumulatorInRegister(callee);
   }
 
@@ -2739,13 +2859,12 @@
     }
   }
 
-  // Convert old value into a number.
-  builder()->CastAccumulatorToNumber();
-
   // Save result for postfix expressions.
   if (is_postfix) {
     old_value = register_allocator()->outer()->NewRegister();
-    builder()->StoreAccumulatorInRegister(old_value);
+
+    // Convert old value into a number before saving it.
+    builder()->CastAccumulatorToNumber().StoreAccumulatorInRegister(old_value);
   }
 
   // Perform +1/-1 operation.
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index 4ef1738..0dcc9be 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -11,15 +11,18 @@
 
 namespace v8 {
 namespace internal {
+
+class CompilationInfo;
+
 namespace interpreter {
 
 class LoopBuilder;
 
 class BytecodeGenerator final : public AstVisitor {
  public:
-  BytecodeGenerator(Isolate* isolate, Zone* zone);
+  explicit BytecodeGenerator(CompilationInfo* info);
 
-  Handle<BytecodeArray> MakeBytecode(CompilationInfo* info);
+  Handle<BytecodeArray> MakeBytecode();
 
 #define DECLARE_VISIT(type) void Visit##type(type* node) override;
   AST_NODE_LIST(DECLARE_VISIT)
@@ -109,6 +112,13 @@
   void BuildHoleCheckForVariableLoad(VariableMode mode, Handle<String> name);
   void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
 
+  // Build jump to targets[value], where
+  // start_index <= value < start_index + size.
+  void BuildIndexedJump(Register value, size_t start_index, size_t size,
+                        ZoneVector<BytecodeLabel>& targets);
+
+  void VisitGeneratorPrologue();
+
   void VisitArgumentsObject(Variable* variable);
   void VisitRestArgumentsArray(Variable* rest);
   void VisitCallSuper(Call* call);
@@ -133,7 +143,9 @@
                                   Register value_out);
   void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
 
-  // Visit the body of a loop iteration.
+  // Visit the header/body of a loop iteration.
+  void VisitIterationHeader(IterationStatement* stmt,
+                            LoopBuilder* loop_builder);
   void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
 
   // Visit a statement and switch scopes, the context is in the accumulator.
@@ -159,16 +171,11 @@
   template <size_t N>
   void InitializeWithConsecutiveRegisters(Register (&registers)[N]);
 
-  inline void set_builder(BytecodeArrayBuilder* builder) { builder_ = builder; }
   inline BytecodeArrayBuilder* builder() const { return builder_; }
-
   inline Isolate* isolate() const { return isolate_; }
   inline Zone* zone() const { return zone_; }
-
   inline Scope* scope() const { return scope_; }
-  inline void set_scope(Scope* scope) { scope_ = scope; }
   inline CompilationInfo* info() const { return info_; }
-  inline void set_info(CompilationInfo* info) { info_ = info; }
 
   inline ControlScope* execution_control() const { return execution_control_; }
   inline void set_execution_control(ControlScope* scope) {
@@ -204,6 +211,8 @@
   ContextScope* execution_context_;
   ExpressionResultScope* execution_result_;
   RegisterAllocationScope* register_allocator_;
+  ZoneVector<BytecodeLabel> generator_resume_points_;
+  Register generator_state_;
   int try_catch_nesting_level_;
   int try_finally_nesting_level_;
 };
diff --git a/src/interpreter/bytecode-peephole-optimizer.cc b/src/interpreter/bytecode-peephole-optimizer.cc
new file mode 100644
index 0000000..803fc23
--- /dev/null
+++ b/src/interpreter/bytecode-peephole-optimizer.cc
@@ -0,0 +1,178 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-peephole-optimizer.h"
+
+#include "src/interpreter/constant-array-builder.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
+    ConstantArrayBuilder* constant_array_builder,
+    BytecodePipelineStage* next_stage)
+    : constant_array_builder_(constant_array_builder),
+      next_stage_(next_stage),
+      last_is_discardable_(false) {
+  InvalidateLast();
+}
+
+void BytecodePeepholeOptimizer::InvalidateLast() {
+  last_.set_bytecode(Bytecode::kIllegal);
+}
+
+bool BytecodePeepholeOptimizer::LastIsValid() const {
+  return last_.bytecode() != Bytecode::kIllegal;
+}
+
+void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
+  last_.Clone(node);
+  last_is_discardable_ = true;
+}
+
+// override
+size_t BytecodePeepholeOptimizer::FlushForOffset() {
+  size_t buffered_size = next_stage_->FlushForOffset();
+  if (LastIsValid()) {
+    if (last_.bytecode() == Bytecode::kNop &&
+        !last_.source_info().is_statement()) {
+      // The Nop can be dropped as it doesn't have a statement
+      // position for the debugger and doesn't have any effects by
+      // definition.
+      InvalidateLast();
+    } else {
+      buffered_size += last_.Size();
+      last_is_discardable_ = false;
+    }
+  }
+  return buffered_size;
+}
+
+// override
+void BytecodePeepholeOptimizer::FlushBasicBlock() {
+  if (LastIsValid()) {
+    next_stage_->Write(&last_);
+    InvalidateLast();
+  }
+  next_stage_->FlushBasicBlock();
+}
+
+// override
+void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
+  // Attempt optimization if there is an earlier node to optimize with.
+  if (LastIsValid()) {
+    node = Optimize(node);
+    // Only output the last node if it wasn't invalidated by the optimization.
+    if (LastIsValid()) {
+      next_stage_->Write(&last_);
+      InvalidateLast();
+    }
+  }
+
+  if (node != nullptr) {
+    SetLast(node);
+  }
+}
+
+Handle<Object> BytecodePeepholeOptimizer::GetConstantForIndexOperand(
+    const BytecodeNode* const node, int index) const {
+  DCHECK_LE(index, node->operand_count());
+  DCHECK_EQ(Bytecodes::GetOperandType(node->bytecode(), 0), OperandType::kIdx);
+  uint32_t index_operand = node->operand(0);
+  return constant_array_builder_->At(index_operand);
+}
+
+bool BytecodePeepholeOptimizer::LastBytecodePutsNameInAccumulator() const {
+  DCHECK(LastIsValid());
+  return (last_.bytecode() == Bytecode::kTypeOf ||
+          last_.bytecode() == Bytecode::kToName ||
+          (last_.bytecode() == Bytecode::kLdaConstant &&
+           GetConstantForIndexOperand(&last_, 0)->IsName()));
+}
+
+void BytecodePeepholeOptimizer::UpdateCurrentBytecode(BytecodeNode* current) {
+  if (Bytecodes::IsJumpIfToBoolean(current->bytecode()) &&
+      Bytecodes::WritesBooleanToAccumulator(last_.bytecode())) {
+    // Conditional jumps with boolean conditions are emitted in
+    // ToBoolean form by the bytecode array builder,
+    // i.e. JumpIfToBooleanTrue rather JumpIfTrue. The ToBoolean element
+    // can be removed if the previous bytecode put a boolean value in
+    // the accumulator.
+    Bytecode jump = Bytecodes::GetJumpWithoutToBoolean(current->bytecode());
+    current->set_bytecode(jump, current->operand(0), current->operand_scale());
+  } else if (current->bytecode() == Bytecode::kToBooleanLogicalNot &&
+             Bytecodes::WritesBooleanToAccumulator(last_.bytecode())) {
+    // Logical-nots are emitted in ToBoolean form by the bytecode array
+    // builder, The ToBoolean element can be removed if the previous bytecode
+    // put a boolean value in the accumulator.
+    current->set_bytecode(Bytecode::kLogicalNot);
+  }
+}
+
+bool BytecodePeepholeOptimizer::CanElideCurrent(
+    const BytecodeNode* const current) const {
+  if (Bytecodes::IsLdarOrStar(last_.bytecode()) &&
+      Bytecodes::IsLdarOrStar(current->bytecode()) &&
+      current->operand(0) == last_.operand(0)) {
+    // Ldar and Star make the accumulator and register hold equivalent
+    // values. Only the first bytecode is needed if there's a sequence
+    // of back-to-back Ldar and Star bytecodes with the same operand.
+    return true;
+  } else if (current->bytecode() == Bytecode::kToName &&
+             LastBytecodePutsNameInAccumulator()) {
+    // If the previous bytecode ensured a name was in the accumulator,
+    // the type coercion ToName() can be elided.
+    return true;
+  } else {
+    // Additional candidates for eliding current:
+    // (i) ToNumber if the last puts a number in the accumulator.
+    return false;
+  }
+}
+
+bool BytecodePeepholeOptimizer::CanElideLast(
+    const BytecodeNode* const current) const {
+  if (!last_is_discardable_) {
+    return false;
+  }
+
+  if (last_.bytecode() == Bytecode::kNop) {
+    // Nop are placeholders for holding source position information
+    // and can be elided.
+    return true;
+  } else if (Bytecodes::IsAccumulatorLoadWithoutEffects(current->bytecode()) &&
+             Bytecodes::IsAccumulatorLoadWithoutEffects(last_.bytecode())) {
+    // The accumulator is invisible to the debugger. If there is a sequence of
+    // consecutive accumulator loads (that don't have side effects) then only
+    // the final load is potentially visible.
+    return true;
+  } else {
+    return false;
+  }
+}
+
+BytecodeNode* BytecodePeepholeOptimizer::Optimize(BytecodeNode* current) {
+  UpdateCurrentBytecode(current);
+
+  if (CanElideCurrent(current)) {
+    if (current->source_info().is_valid()) {
+      current->set_bytecode(Bytecode::kNop);
+    } else {
+      current = nullptr;
+    }
+  } else if (CanElideLast(current)) {
+    if (last_.source_info().is_valid()) {
+      current->source_info().Update(last_.source_info());
+    }
+    InvalidateLast();
+  }
+  return current;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-peephole-optimizer.h b/src/interpreter/bytecode-peephole-optimizer.h
new file mode 100644
index 0000000..1981395
--- /dev/null
+++ b/src/interpreter/bytecode-peephole-optimizer.h
@@ -0,0 +1,57 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ConstantArrayBuilder;
+
+// An optimization stage for performing peephole optimizations on
+// generated bytecode. The optimizer may buffer one bytecode
+// internally.
+class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
+                                        public ZoneObject {
+ public:
+  BytecodePeepholeOptimizer(ConstantArrayBuilder* constant_array_builder,
+                            BytecodePipelineStage* next_stage);
+
+  void Write(BytecodeNode* node) override;
+  size_t FlushForOffset() override;
+  void FlushBasicBlock() override;
+
+ private:
+  BytecodeNode* Optimize(BytecodeNode* current);
+
+  void UpdateCurrentBytecode(BytecodeNode* const current);
+  bool CanElideCurrent(const BytecodeNode* const current) const;
+  bool CanElideLast(const BytecodeNode* const current) const;
+
+  void InvalidateLast();
+  bool LastIsValid() const;
+  void SetLast(const BytecodeNode* const node);
+
+  bool LastBytecodePutsNameInAccumulator() const;
+
+  Handle<Object> GetConstantForIndexOperand(const BytecodeNode* const node,
+                                            int index) const;
+
+  ConstantArrayBuilder* constant_array_builder_;
+  BytecodePipelineStage* next_stage_;
+  BytecodeNode last_;
+  bool last_is_discardable_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodePeepholeOptimizer);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
diff --git a/src/interpreter/bytecode-pipeline.cc b/src/interpreter/bytecode-pipeline.cc
new file mode 100644
index 0000000..7bfb815
--- /dev/null
+++ b/src/interpreter/bytecode-pipeline.cc
@@ -0,0 +1,162 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+#include <iomanip>
+#include "src/interpreter/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+void BytecodeSourceInfo::Update(const BytecodeSourceInfo& entry) {
+  DCHECK(entry.is_valid());
+  if (!is_valid() || (entry.is_statement() && !is_statement()) ||
+      (entry.is_statement() && is_statement() &&
+       entry.source_position() > source_position())) {
+    // Position is updated if any of the following conditions are met:
+    //   (1) there is no existing position.
+    //   (2) the incoming position is a statement and the current position
+    //       is an expression.
+    //   (3) the existing position is a statement and the incoming
+    //       statement has a later source position.
+    // Condition 3 is needed for the first statement in a function which
+    // may end up with later statement positions being added during bytecode
+    // generation.
+    source_position_ = entry.source_position_;
+    is_statement_ = entry.is_statement_;
+  }
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+  bytecode_ = bytecode;
+  operand_scale_ = OperandScale::kSingle;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+                           OperandScale operand_scale) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
+  bytecode_ = bytecode;
+  operands_[0] = operand0;
+  operand_scale_ = operand_scale;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+                           uint32_t operand1, OperandScale operand_scale) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
+  bytecode_ = bytecode;
+  operands_[0] = operand0;
+  operands_[1] = operand1;
+  operand_scale_ = operand_scale;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+                           uint32_t operand1, uint32_t operand2,
+                           OperandScale operand_scale) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
+  bytecode_ = bytecode;
+  operands_[0] = operand0;
+  operands_[1] = operand1;
+  operands_[2] = operand2;
+  operand_scale_ = operand_scale;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+                           uint32_t operand1, uint32_t operand2,
+                           uint32_t operand3, OperandScale operand_scale) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 4);
+  bytecode_ = bytecode;
+  operands_[0] = operand0;
+  operands_[1] = operand1;
+  operands_[2] = operand2;
+  operands_[3] = operand3;
+  operand_scale_ = operand_scale;
+}
+
+void BytecodeNode::set_bytecode(Bytecode bytecode) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+  bytecode_ = bytecode;
+  operand_scale_ = OperandScale::kSingle;
+}
+
+void BytecodeNode::set_bytecode(Bytecode bytecode, uint32_t operand0,
+                                OperandScale operand_scale) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
+  bytecode_ = bytecode;
+  operands_[0] = operand0;
+  operand_scale_ = operand_scale;
+}
+
+size_t BytecodeNode::Size() const {
+  size_t size = Bytecodes::Size(bytecode_, operand_scale_);
+  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_)) {
+    size += 1;
+  }
+  return size;
+}
+
+void BytecodeNode::Print(std::ostream& os) const {
+#ifdef DEBUG
+  std::ios saved_state(nullptr);
+  saved_state.copyfmt(os);
+
+  os << Bytecodes::ToString(bytecode_);
+  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_)) {
+    Bytecode scale_prefix =
+        Bytecodes::OperandScaleToPrefixBytecode(operand_scale_);
+    os << '.' << Bytecodes::ToString(scale_prefix);
+  }
+
+  for (int i = 0; i < operand_count(); ++i) {
+    os << ' ' << std::setw(8) << std::setfill('0') << std::hex << operands_[i];
+  }
+  os.copyfmt(saved_state);
+
+  if (source_info_.is_valid()) {
+    os << source_info_;
+  }
+  os << '\n';
+#else
+  os << static_cast<const void*>(this);
+#endif  // DEBUG
+}
+
+void BytecodeNode::Clone(const BytecodeNode* const other) {
+  memcpy(this, other, sizeof(*other));
+}
+
+bool BytecodeNode::operator==(const BytecodeNode& other) const {
+  if (this == &other) {
+    return true;
+  } else if (this->bytecode() != other.bytecode() ||
+             this->source_info() != other.source_info()) {
+    return false;
+  } else {
+    for (int i = 0; i < this->operand_count(); ++i) {
+      if (this->operand(i) != other.operand(i)) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
+  node.Print(os);
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
+  if (info.is_valid()) {
+    char description = info.is_statement() ? 'S' : 'E';
+    os << info.source_position() << ' ' << description << '>';
+  }
+  return os;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-pipeline.h b/src/interpreter/bytecode-pipeline.h
new file mode 100644
index 0000000..ade712c
--- /dev/null
+++ b/src/interpreter/bytecode-pipeline.h
@@ -0,0 +1,138 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#define V8_INTERPRETER_BYTECODE_PIPELINE_H_
+
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeNode;
+class BytecodeSourceInfo;
+
+// Interface for bytecode pipeline stages.
+class BytecodePipelineStage {
+ public:
+  virtual ~BytecodePipelineStage() {}
+
+  // Write bytecode node |node| into pipeline. The node is only valid
+  // for the duration of the call. Callee's should clone it if
+  // deferring Write() to the next stage.
+  virtual void Write(BytecodeNode* node) = 0;
+
+  // Flush state for bytecode array offset calculation. Returns the
+  // current size of bytecode array.
+  virtual size_t FlushForOffset() = 0;
+
+  // Flush state to terminate basic block.
+  virtual void FlushBasicBlock() = 0;
+};
+
+// Source code position information.
+class BytecodeSourceInfo final {
+ public:
+  static const int kUninitializedPosition = -1;
+
+  BytecodeSourceInfo(int position = kUninitializedPosition,
+                     bool is_statement = false)
+      : source_position_(position), is_statement_(is_statement) {}
+
+  // Combine later source info with current.
+  void Update(const BytecodeSourceInfo& entry);
+
+  int source_position() const {
+    DCHECK(is_valid());
+    return source_position_;
+  }
+
+  bool is_statement() const { return is_valid() && is_statement_; }
+
+  bool is_valid() const { return source_position_ != kUninitializedPosition; }
+  void set_invalid() { source_position_ = kUninitializedPosition; }
+
+  bool operator==(const BytecodeSourceInfo& other) const {
+    return source_position_ == other.source_position_ &&
+           is_statement_ == other.is_statement_;
+  }
+  bool operator!=(const BytecodeSourceInfo& other) const {
+    return source_position_ != other.source_position_ ||
+           is_statement_ != other.is_statement_;
+  }
+
+ private:
+  int source_position_;
+  bool is_statement_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeSourceInfo);
+};
+
+// A container for a generated bytecode, it's operands, and source information.
+// These must be allocated by a BytecodeNodeAllocator instance.
+class BytecodeNode final : ZoneObject {
+ public:
+  explicit BytecodeNode(Bytecode bytecode = Bytecode::kIllegal);
+  BytecodeNode(Bytecode bytecode, uint32_t operand0,
+               OperandScale operand_scale);
+  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+               OperandScale operand_scale);
+  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+               uint32_t operand2, OperandScale operand_scale);
+  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+               uint32_t operand2, uint32_t operand3,
+               OperandScale operand_scale);
+
+  void set_bytecode(Bytecode bytecode);
+  void set_bytecode(Bytecode bytecode, uint32_t operand0,
+                    OperandScale operand_scale);
+
+  // Clone |other|.
+  void Clone(const BytecodeNode* const other);
+
+  // Print to stream |os|.
+  void Print(std::ostream& os) const;
+
+  // Return the size when this node is serialized to a bytecode array.
+  size_t Size() const;
+
+  Bytecode bytecode() const { return bytecode_; }
+
+  uint32_t operand(int i) const {
+    DCHECK_LT(i, operand_count());
+    return operands_[i];
+  }
+  uint32_t* operands() { return operands_; }
+  const uint32_t* operands() const { return operands_; }
+
+  int operand_count() const { return Bytecodes::NumberOfOperands(bytecode_); }
+  OperandScale operand_scale() const { return operand_scale_; }
+
+  const BytecodeSourceInfo& source_info() const { return source_info_; }
+  BytecodeSourceInfo& source_info() { return source_info_; }
+
+  bool operator==(const BytecodeNode& other) const;
+  bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
+
+ private:
+  static const int kInvalidPosition = kMinInt;
+  static const size_t kMaxOperands = 4;
+
+  Bytecode bytecode_;
+  uint32_t operands_[kMaxOperands];
+  OperandScale operand_scale_;
+  BytecodeSourceInfo source_info_;
+};
+
+std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info);
+std::ostream& operator<<(std::ostream& os, const BytecodeNode& node);
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_PIPELINE_H_
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
index 696a3b1..a4f6845 100644
--- a/src/interpreter/bytecode-register-allocator.h
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -65,7 +65,7 @@
   DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
 };
 
-// A class than allows the instantiator to allocate temporary registers that are
+// A class that allows the instantiator to allocate temporary registers that are
 // cleaned up when scope is closed.
 class BytecodeRegisterAllocator final {
  public:
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index c724827..e7d1432 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -41,6 +41,28 @@
 OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE_TRAITS)
 #undef DECLARE_OPERAND_TYPE_TRAITS
 
+template <OperandType operand_type, OperandScale operand_scale>
+struct OperandScaler {
+  template <bool, OperandSize, OperandScale>
+  struct Helper {
+    static const int kSize = 0;
+  };
+  template <OperandSize size, OperandScale scale>
+  struct Helper<false, size, scale> {
+    static const int kSize = static_cast<int>(size);
+  };
+  template <OperandSize size, OperandScale scale>
+  struct Helper<true, size, scale> {
+    static const int kSize = static_cast<int>(size) * static_cast<int>(scale);
+  };
+
+  static const int kSize =
+      Helper<OperandTraits<operand_type>::TypeInfo::kIsScalable,
+             OperandTraits<operand_type>::TypeInfo::kUnscaledSize,
+             operand_scale>::kSize;
+  static const OperandSize kOperandSize = static_cast<OperandSize>(kSize);
+};
+
 template <OperandType>
 struct RegisterOperandTraits {
   static const int kIsRegisterOperand = 0;
@@ -61,11 +83,30 @@
           OperandType operand_1, OperandType operand_2, OperandType operand_3>
 struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
                       operand_3> {
-  static OperandType GetOperandType(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const OperandType kOperands[] = {operand_0, operand_1, operand_2,
-                                     operand_3};
-    return kOperands[i];
+  static const OperandType* GetOperandTypes() {
+    static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
+                                                operand_3, OperandType::kNone};
+    return operand_types;
+  }
+
+  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+    switch (operand_scale) {
+#define CASE(Name, _)                                                  \
+  case OperandScale::k##Name: {                                        \
+    static const OperandSize kOperandSizes[] = {                       \
+        OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
+        OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
+        OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
+        OperandScaler<operand_3, OperandScale::k##Name>::kOperandSize, \
+    };                                                                 \
+    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
+    return kOperandSizes[i];                                           \
+  }
+      OPERAND_SCALE_LIST(CASE)
+#undef CASE
+    }
+    UNREACHABLE();
+    return OperandSize::kNone;
   }
 
   template <OperandType ot>
@@ -98,10 +139,29 @@
 template <AccumulatorUse accumulator_use, OperandType operand_0,
           OperandType operand_1, OperandType operand_2>
 struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
-  static inline OperandType GetOperandType(int i) {
-    DCHECK(0 <= i && i <= 2);
-    const OperandType kOperands[] = {operand_0, operand_1, operand_2};
-    return kOperands[i];
+  static const OperandType* GetOperandTypes() {
+    static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
+                                                OperandType::kNone};
+    return operand_types;
+  }
+
+  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+    switch (operand_scale) {
+#define CASE(Name, _)                                                  \
+  case OperandScale::k##Name: {                                        \
+    static const OperandSize kOperandSizes[] = {                       \
+        OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
+        OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
+        OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
+    };                                                                 \
+    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
+    return kOperandSizes[i];                                           \
+  }
+      OPERAND_SCALE_LIST(CASE)
+#undef CASE
+    }
+    UNREACHABLE();
+    return OperandSize::kNone;
   }
 
   template <OperandType ot>
@@ -130,10 +190,28 @@
 template <AccumulatorUse accumulator_use, OperandType operand_0,
           OperandType operand_1>
 struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
-  static inline OperandType GetOperandType(int i) {
-    DCHECK(0 <= i && i < kOperandCount);
-    const OperandType kOperands[] = {operand_0, operand_1};
-    return kOperands[i];
+  static const OperandType* GetOperandTypes() {
+    static const OperandType operand_types[] = {operand_0, operand_1,
+                                                OperandType::kNone};
+    return operand_types;
+  }
+
+  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+    switch (operand_scale) {
+#define CASE(Name, _)                                                  \
+  case OperandScale::k##Name: {                                        \
+    static const OperandSize kOperandSizes[] = {                       \
+        OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
+        OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
+    };                                                                 \
+    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
+    return kOperandSizes[i];                                           \
+  }
+      OPERAND_SCALE_LIST(CASE)
+#undef CASE
+    }
+    UNREACHABLE();
+    return OperandSize::kNone;
   }
 
   template <OperandType ot>
@@ -158,9 +236,26 @@
 
 template <AccumulatorUse accumulator_use, OperandType operand_0>
 struct BytecodeTraits<accumulator_use, operand_0> {
-  static inline OperandType GetOperandType(int i) {
-    DCHECK(i == 0);
-    return operand_0;
+  static const OperandType* GetOperandTypes() {
+    static const OperandType operand_types[] = {operand_0, OperandType::kNone};
+    return operand_types;
+  }
+
+  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+    switch (operand_scale) {
+#define CASE(Name, _)                                                  \
+  case OperandScale::k##Name: {                                        \
+    static const OperandSize kOperandSizes[] = {                       \
+        OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
+    };                                                                 \
+    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
+    return kOperandSizes[i];                                           \
+  }
+      OPERAND_SCALE_LIST(CASE)
+#undef CASE
+    }
+    UNREACHABLE();
+    return OperandSize::kNone;
   }
 
   template <OperandType ot>
@@ -182,9 +277,14 @@
 
 template <AccumulatorUse accumulator_use>
 struct BytecodeTraits<accumulator_use> {
-  static inline OperandType GetOperandType(int i) {
+  static const OperandType* GetOperandTypes() {
+    static const OperandType operand_types[] = {OperandType::kNone};
+    return operand_types;
+  }
+
+  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
     UNREACHABLE();
-    return OperandType::kNone;
+    return OperandSize::kNone;
   }
 
   template <OperandType ot>
@@ -200,37 +300,22 @@
   static const int kRegisterOperandBitmap = 0;
 };
 
-template <bool>
-struct OperandScaler {
-  static int Multiply(int size, int operand_scale) { return 0; }
-};
-
-template <>
-struct OperandScaler<false> {
-  static int Multiply(int size, int operand_scale) { return size; }
-};
-
-template <>
-struct OperandScaler<true> {
-  static int Multiply(int size, int operand_scale) {
-    return size * operand_scale;
-  }
-};
-
 static OperandSize ScaledOperandSize(OperandType operand_type,
                                      OperandScale operand_scale) {
+  STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+                OperandScale::kLast == OperandScale::kQuadruple);
+  int index = static_cast<int>(operand_scale) >> 1;
   switch (operand_type) {
-#define CASE(Name, TypeInfo)                                                   \
-  case OperandType::k##Name: {                                                 \
-    OperandSize base_size = OperandTypeInfoTraits<TypeInfo>::kUnscaledSize;    \
-    int size =                                                                 \
-        OperandScaler<OperandTypeInfoTraits<TypeInfo>::kIsScalable>::Multiply( \
-            static_cast<int>(base_size), static_cast<int>(operand_scale));     \
-    OperandSize operand_size = static_cast<OperandSize>(size);                 \
-    DCHECK(operand_size == OperandSize::kByte ||                               \
-           operand_size == OperandSize::kShort ||                              \
-           operand_size == OperandSize::kQuad);                                \
-    return operand_size;                                                       \
+#define CASE(Name, TypeInfo)                                    \
+  case OperandType::k##Name: {                                  \
+    static const OperandSize kOperandSizes[] = {                \
+        OperandScaler<OperandType::k##Name,                     \
+                      OperandScale::kSingle>::kOperandSize,     \
+        OperandScaler<OperandType::k##Name,                     \
+                      OperandScale::kDouble>::kOperandSize,     \
+        OperandScaler<OperandType::k##Name,                     \
+                      OperandScale::kQuadruple>::kOperandSize}; \
+    return kOperandSizes[index];                                \
   }
     OPERAND_TYPE_LIST(CASE)
 #undef CASE
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index fd27f39..5a67847 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -74,15 +74,13 @@
 // static
 const char* Bytecodes::OperandScaleToString(OperandScale operand_scale) {
   switch (operand_scale) {
-    case OperandScale::kSingle:
-      return "Single";
-    case OperandScale::kDouble:
-      return "Double";
-    case OperandScale::kQuadruple:
-      return "Quadruple";
-    case OperandScale::kInvalid:
-      UNREACHABLE();
+#define CASE(Name, _)         \
+  case OperandScale::k##Name: \
+    return #Name;
+    OPERAND_SCALE_LIST(CASE)
+#undef CASE
   }
+  UNREACHABLE();
   return "";
 }
 
@@ -152,6 +150,11 @@
 
 
 // static
+size_t Bytecodes::ReturnCount(Bytecode bytecode) {
+  return bytecode == Bytecode::kReturn ? 1 : 0;
+}
+
+// static
 int Bytecodes::NumberOfOperands(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
@@ -241,24 +244,81 @@
 }
 
 // static
+bool Bytecodes::WritesBooleanToAccumulator(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kLdaTrue:
+    case Bytecode::kLdaFalse:
+    case Bytecode::kToBooleanLogicalNot:
+    case Bytecode::kLogicalNot:
+    case Bytecode::kTestEqual:
+    case Bytecode::kTestNotEqual:
+    case Bytecode::kTestEqualStrict:
+    case Bytecode::kTestLessThan:
+    case Bytecode::kTestLessThanOrEqual:
+    case Bytecode::kTestGreaterThan:
+    case Bytecode::kTestGreaterThanOrEqual:
+    case Bytecode::kTestInstanceOf:
+    case Bytecode::kTestIn:
+    case Bytecode::kForInDone:
+      return true;
+    default:
+      return false;
+  }
+}
+
+// static
+bool Bytecodes::IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kLdaZero:
+    case Bytecode::kLdaSmi:
+    case Bytecode::kLdaUndefined:
+    case Bytecode::kLdaNull:
+    case Bytecode::kLdaTheHole:
+    case Bytecode::kLdaTrue:
+    case Bytecode::kLdaFalse:
+    case Bytecode::kLdaConstant:
+    case Bytecode::kLdar:
+      return true;
+    default:
+      return false;
+  }
+}
+
+// static
 OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
+  DCHECK_LE(bytecode, Bytecode::kLast);
+  DCHECK_LT(i, NumberOfOperands(bytecode));
+  DCHECK_GE(i, 0);
+  return GetOperandTypes(bytecode)[i];
+}
+
+// static
+const OperandType* Bytecodes::GetOperandTypes(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
 #define CASE(Name, ...)   \
   case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__>::GetOperandType(i);
+    return BytecodeTraits<__VA_ARGS__>::GetOperandTypes();
     BYTECODE_LIST(CASE)
 #undef CASE
   }
   UNREACHABLE();
-  return OperandType::kNone;
+  return nullptr;
 }
 
 // static
 OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
                                       OperandScale operand_scale) {
-  OperandType op_type = GetOperandType(bytecode, i);
-  return ScaledOperandSize(op_type, operand_scale);
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__>::GetOperandSize(i, operand_scale);
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return OperandSize::kNone;
 }
 
 // static
@@ -279,6 +339,7 @@
 // static
 int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
                                 OperandScale operand_scale) {
+  DCHECK_LT(i, Bytecodes::NumberOfOperands(bytecode));
   // TODO(oth): restore this to a statically determined constant.
   int offset = 1;
   for (int operand_index = 0; operand_index < i; ++operand_index) {
@@ -343,6 +404,31 @@
   return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
 }
 
+// static
+bool Bytecodes::IsJumpIfToBoolean(Bytecode bytecode) {
+  return bytecode == Bytecode::kJumpIfToBooleanTrue ||
+         bytecode == Bytecode::kJumpIfToBooleanFalse ||
+         bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+         bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
+}
+
+// static
+Bytecode Bytecodes::GetJumpWithoutToBoolean(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kJumpIfToBooleanTrue:
+      return Bytecode::kJumpIfTrue;
+    case Bytecode::kJumpIfToBooleanFalse:
+      return Bytecode::kJumpIfFalse;
+    case Bytecode::kJumpIfToBooleanTrueConstant:
+      return Bytecode::kJumpIfTrueConstant;
+    case Bytecode::kJumpIfToBooleanFalseConstant:
+      return Bytecode::kJumpIfFalseConstant;
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return Bytecode::kIllegal;
+}
 
 // static
 bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
@@ -371,6 +457,11 @@
 }
 
 // static
+bool Bytecodes::IsLdarOrStar(Bytecode bytecode) {
+  return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
+}
+
+// static
 bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
   switch (bytecode) {
 #define CASE(Name, ...)                              \
@@ -461,6 +552,24 @@
 }
 
 // static
+int Bytecodes::GetNumberOfRegistersRepresentedBy(OperandType operand_type) {
+  switch (operand_type) {
+    case OperandType::kMaybeReg:
+    case OperandType::kReg:
+    case OperandType::kRegOut:
+      return 1;
+    case OperandType::kRegPair:
+    case OperandType::kRegOutPair:
+      return 2;
+    case OperandType::kRegOutTriple:
+      return 3;
+    default:
+      UNREACHABLE();
+  }
+  return 0;
+}
+
+// static
 bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
   switch (operand_type) {
 #define CASE(Name, _)        \
@@ -474,10 +583,62 @@
 }
 
 // static
-OperandScale Bytecodes::NextOperandScale(OperandScale operand_scale) {
-  DCHECK(operand_scale >= OperandScale::kSingle &&
-         operand_scale <= OperandScale::kMaxValid);
-  return static_cast<OperandScale>(2 * static_cast<int>(operand_scale));
+OperandSize Bytecodes::SizeForSignedOperand(int value) {
+  if (kMinInt8 <= value && value <= kMaxInt8) {
+    return OperandSize::kByte;
+  } else if (kMinInt16 <= value && value <= kMaxInt16) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
+}
+
+// static
+OperandSize Bytecodes::SizeForUnsignedOperand(int value) {
+  DCHECK_GE(value, 0);
+  if (value <= kMaxUInt8) {
+    return OperandSize::kByte;
+  } else if (value <= kMaxUInt16) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
+}
+
+OperandSize Bytecodes::SizeForUnsignedOperand(size_t value) {
+  if (value <= static_cast<size_t>(kMaxUInt8)) {
+    return OperandSize::kByte;
+  } else if (value <= static_cast<size_t>(kMaxUInt16)) {
+    return OperandSize::kShort;
+  } else if (value <= kMaxUInt32) {
+    return OperandSize::kQuad;
+  } else {
+    UNREACHABLE();
+    return OperandSize::kQuad;
+  }
+}
+
+OperandScale Bytecodes::OperandSizesToScale(OperandSize size0,
+                                            OperandSize size1,
+                                            OperandSize size2,
+                                            OperandSize size3) {
+  OperandSize upper = std::max(size0, size1);
+  OperandSize lower = std::max(size2, size3);
+  OperandSize result = std::max(upper, lower);
+  // Operand sizes have been scaled before calling this function.
+  // Currently all scalable operands are byte sized at
+  // OperandScale::kSingle.
+  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
+                    static_cast<int>(OperandScale::kSingle) &&
+                static_cast<int>(OperandSize::kShort) ==
+                    static_cast<int>(OperandScale::kDouble) &&
+                static_cast<int>(OperandSize::kQuad) ==
+                    static_cast<int>(OperandScale::kQuadruple));
+  OperandScale operand_scale = static_cast<OperandScale>(result);
+  DCHECK(operand_scale == OperandScale::kSingle ||
+         operand_scale == OperandScale::kDouble ||
+         operand_scale == OperandScale::kQuadruple);
+  return operand_scale;
 }
 
 // static
@@ -645,21 +806,29 @@
 }
 
 static const int kLastParamRegisterIndex =
-    -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
+    (InterpreterFrameConstants::kRegisterFileFromFp -
+     InterpreterFrameConstants::kLastParamFromFp) /
+    kPointerSize;
 static const int kFunctionClosureRegisterIndex =
-    -InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
+    (InterpreterFrameConstants::kRegisterFileFromFp -
+     StandardFrameConstants::kFunctionOffset) /
+    kPointerSize;
 static const int kCurrentContextRegisterIndex =
-    -InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
+    (InterpreterFrameConstants::kRegisterFileFromFp -
+     StandardFrameConstants::kContextOffset) /
+    kPointerSize;
 static const int kNewTargetRegisterIndex =
-    -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
-
-bool Register::is_byte_operand() const {
-  return index_ >= -kMaxInt8 && index_ <= -kMinInt8;
-}
-
-bool Register::is_short_operand() const {
-  return index_ >= -kMaxInt16 && index_ <= -kMinInt16;
-}
+    (InterpreterFrameConstants::kRegisterFileFromFp -
+     InterpreterFrameConstants::kNewTargetFromFp) /
+    kPointerSize;
+static const int kBytecodeArrayRegisterIndex =
+    (InterpreterFrameConstants::kRegisterFileFromFp -
+     InterpreterFrameConstants::kBytecodeArrayFromFp) /
+    kPointerSize;
+static const int kBytecodeOffsetRegisterIndex =
+    (InterpreterFrameConstants::kRegisterFileFromFp -
+     InterpreterFrameConstants::kBytecodeOffsetFromFp) /
+    kPointerSize;
 
 Register Register::FromParameterIndex(int index, int parameter_count) {
   DCHECK_GE(index, 0);
@@ -669,40 +838,60 @@
   return Register(register_index);
 }
 
-
 int Register::ToParameterIndex(int parameter_count) const {
   DCHECK(is_parameter());
   return index() - kLastParamRegisterIndex + parameter_count - 1;
 }
 
-
 Register Register::function_closure() {
   return Register(kFunctionClosureRegisterIndex);
 }
 
-
 bool Register::is_function_closure() const {
   return index() == kFunctionClosureRegisterIndex;
 }
 
-
 Register Register::current_context() {
   return Register(kCurrentContextRegisterIndex);
 }
 
-
 bool Register::is_current_context() const {
   return index() == kCurrentContextRegisterIndex;
 }
 
-
 Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
 
-
 bool Register::is_new_target() const {
   return index() == kNewTargetRegisterIndex;
 }
 
+Register Register::bytecode_array() {
+  return Register(kBytecodeArrayRegisterIndex);
+}
+
+bool Register::is_bytecode_array() const {
+  return index() == kBytecodeArrayRegisterIndex;
+}
+
+Register Register::bytecode_offset() {
+  return Register(kBytecodeOffsetRegisterIndex);
+}
+
+bool Register::is_bytecode_offset() const {
+  return index() == kBytecodeOffsetRegisterIndex;
+}
+
+OperandSize Register::SizeOfOperand() const {
+  int32_t operand = ToOperand();
+  if (operand >= kMinInt8 && operand <= kMaxInt8) {
+    return OperandSize::kByte;
+  } else if (operand >= kMinInt16 && operand <= kMaxInt16) {
+    return OperandSize::kShort;
+  } else {
+    return OperandSize::kQuad;
+  }
+}
+
 bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
                              Register reg4, Register reg5) {
   if (reg1.index() + 1 != reg2.index()) {
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index 2361271..d67a390 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -9,6 +9,7 @@
 
 // Clients of this interface shouldn't depend on lots of interpreter internals.
 // Do not include anything from src/interpreter here!
+#include "src/frames.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -97,7 +98,7 @@
     OperandType::kIdx)                                                        \
                                                                               \
   /* Context operations */                                                    \
-  V(PushContext, AccumulatorUse::kRead, OperandType::kReg)                    \
+  V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut)                 \
   V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                     \
   V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                \
     OperandType::kIdx)                                                        \
@@ -149,6 +150,7 @@
   /* Unary Operators */                                                       \
   V(Inc, AccumulatorUse::kReadWrite)                                          \
   V(Dec, AccumulatorUse::kReadWrite)                                          \
+  V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite)                          \
   V(LogicalNot, AccumulatorUse::kReadWrite)                                   \
   V(TypeOf, AccumulatorUse::kReadWrite)                                       \
   V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg)      \
@@ -238,14 +240,22 @@
   /* Non-local flow control */                                                \
   V(Throw, AccumulatorUse::kRead)                                             \
   V(ReThrow, AccumulatorUse::kRead)                                           \
-  V(Return, AccumulatorUse::kNone)                                            \
+  V(Return, AccumulatorUse::kRead)                                            \
+                                                                              \
+  /* Generators */                                                            \
+  V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg)               \
+  V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg)               \
                                                                               \
   /* Debugger */                                                              \
   V(Debugger, AccumulatorUse::kNone)                                          \
   DEBUG_BREAK_BYTECODE_LIST(V)                                                \
                                                                               \
   /* Illegal bytecode (terminates execution) */                               \
-  V(Illegal, AccumulatorUse::kNone)
+  V(Illegal, AccumulatorUse::kNone)                                           \
+                                                                              \
+  /* No operation (used to maintain source positions for peephole */          \
+  /* eliminated bytecodes). */                                                \
+  V(Nop, AccumulatorUse::kNone)
 
 enum class AccumulatorUse : uint8_t {
   kNone = 0,
@@ -266,12 +276,16 @@
 
 // Enumeration of scaling factors applicable to scalable operands. Code
 // relies on being able to cast values to integer scaling values.
+#define OPERAND_SCALE_LIST(V) \
+  V(Single, 1)                \
+  V(Double, 2)                \
+  V(Quadruple, 4)
+
 enum class OperandScale : uint8_t {
-  kSingle = 1,
-  kDouble = 2,
-  kQuadruple = 4,
-  kMaxValid = kQuadruple,
-  kInvalid = 8,
+#define DECLARE_OPERAND_SCALE(Name, Scale) k##Name = Scale,
+  OPERAND_SCALE_LIST(DECLARE_OPERAND_SCALE)
+#undef DECLARE_OPERAND_SCALE
+      kLast = kQuadruple
 };
 
 // Enumeration of the size classes of operand types used by
@@ -328,15 +342,13 @@
 
 // An interpreter Register which is located in the function's Register file
 // in its stack-frame. Register hold parameters, this, and expression values.
-class Register {
+class Register final {
  public:
   explicit Register(int index = kInvalidIndex) : index_(index) {}
 
   int index() const { return index_; }
   bool is_parameter() const { return index() < 0; }
   bool is_valid() const { return index_ != kInvalidIndex; }
-  bool is_byte_operand() const;
-  bool is_short_operand() const;
 
   static Register FromParameterIndex(int index, int parameter_count);
   int ToParameterIndex(int parameter_count) const;
@@ -356,8 +368,20 @@
   static Register new_target();
   bool is_new_target() const;
 
-  int32_t ToOperand() const { return -index_; }
-  static Register FromOperand(int32_t operand) { return Register(-operand); }
+  // Returns the register for the bytecode array.
+  static Register bytecode_array();
+  bool is_bytecode_array() const;
+
+  // Returns the register for the saved bytecode offset.
+  static Register bytecode_offset();
+  bool is_bytecode_offset() const;
+
+  OperandSize SizeOfOperand() const;
+
+  int32_t ToOperand() const { return kRegisterFileStartOffset - index_; }
+  static Register FromOperand(int32_t operand) {
+    return Register(kRegisterFileStartOffset - operand);
+  }
 
   static bool AreContiguous(Register reg1, Register reg2,
                             Register reg3 = Register(),
@@ -387,6 +411,8 @@
 
  private:
   static const int kInvalidIndex = kMaxInt;
+  static const int kRegisterFileStartOffset =
+      InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize;
 
   void* operator new(size_t size);
   void operator delete(void* p);
@@ -447,9 +473,20 @@
   // Returns true if |bytecode| writes the accumulator.
   static bool WritesAccumulator(Bytecode bytecode);
 
+  // Return true if |bytecode| writes the accumulator with a boolean value.
+  static bool WritesBooleanToAccumulator(Bytecode bytecode);
+
+  // Return true if |bytecode| is an accumulator load bytecode,
+  // e.g. LdaConstant, LdaTrue, Ldar.
+  static bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode);
+
   // Returns the i-th operand of |bytecode|.
   static OperandType GetOperandType(Bytecode bytecode, int i);
 
+  // Returns a pointer to an array of operand types terminated in
+  // OperandType::kNone.
+  static const OperandType* GetOperandTypes(Bytecode bytecode);
+
   // Returns the size of the i-th operand of |bytecode|.
   static OperandSize GetOperandSize(Bytecode bytecode, int i,
                                     OperandScale operand_scale);
@@ -473,6 +510,9 @@
   // Returns the size of |operand|.
   static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
 
+  // Returns the number of values which |bytecode| returns.
+  static size_t ReturnCount(Bytecode bytecode);
+
   // Returns true if the bytecode is a conditional jump taking
   // an immediate byte operand (OperandType::kImm).
   static bool IsConditionalJumpImmediate(Bytecode bytecode);
@@ -497,6 +537,13 @@
   // any kind of operand.
   static bool IsJump(Bytecode bytecode);
 
+  // Returns true if the bytecode is a jump that internally coerces the
+  // accumulator to a boolean.
+  static bool IsJumpIfToBoolean(Bytecode bytecode);
+
+  // Returns the equivalent jump bytecode without the accumulator coercion.
+  static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
+
   // Returns true if the bytecode is a conditional jump, a jump, or a return.
   static bool IsJumpOrReturn(Bytecode bytecode);
 
@@ -509,6 +556,9 @@
   // Returns true if the bytecode is a debug break.
   static bool IsDebugBreak(Bytecode bytecode);
 
+  // Returns true if the bytecode is Ldar or Star.
+  static bool IsLdarOrStar(Bytecode bytecode);
+
   // Returns true if the bytecode has wider operand forms.
   static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
 
@@ -524,6 +574,10 @@
   // Returns true if |operand_type| represents a register used as an output.
   static bool IsRegisterOutputOperandType(OperandType operand_type);
 
+  // Returns the number of registers represented by a register operand. For
+  // instance, a RegPair represents two registers.
+  static int GetNumberOfRegistersRepresentedBy(OperandType operand_type);
+
   // Returns true if |operand_type| is a maybe register operand
   // (kMaybeReg).
   static bool IsMaybeRegisterOperandType(OperandType operand_type);
@@ -559,13 +613,34 @@
   // OperandScale values.
   static bool BytecodeHasHandler(Bytecode bytecode, OperandScale operand_scale);
 
-  // Return the next larger operand scale.
-  static OperandScale NextOperandScale(OperandScale operand_scale);
+  // Return the operand size required to hold a signed operand.
+  static OperandSize SizeForSignedOperand(int value);
+
+  // Return the operand size required to hold an unsigned operand.
+  static OperandSize SizeForUnsignedOperand(int value);
+
+  // Return the operand size required to hold an unsigned operand.
+  static OperandSize SizeForUnsignedOperand(size_t value);
+
+  // Return the OperandScale required for bytecode emission of
+  // operand sizes.
+  static OperandScale OperandSizesToScale(
+      OperandSize size0, OperandSize size1 = OperandSize::kByte,
+      OperandSize size2 = OperandSize::kByte,
+      OperandSize size3 = OperandSize::kByte);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
 };
 
+class CreateObjectLiteralFlags {
+ public:
+  class FlagsBits : public BitField8<int, 0, 3> {};
+  class FastClonePropertiesCountBits
+      : public BitField8<int, FlagsBits::kNext, 3> {};
+  STATIC_ASSERT((FlagsBits::kMask & FastClonePropertiesCountBits::kMask) == 0);
+};
+
 std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
 std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
 std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
diff --git a/src/interpreter/control-flow-builders.cc b/src/interpreter/control-flow-builders.cc
index 6510aa4..66d650c 100644
--- a/src/interpreter/control-flow-builders.cc
+++ b/src/interpreter/control-flow-builders.cc
@@ -90,13 +90,16 @@
 LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
 
 
-void LoopBuilder::LoopHeader() {
+void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
   // Jumps from before the loop header into the loop violate ordering
   // requirements of bytecode basic blocks. The only entry into a loop
   // must be the loop header. Surely breaks is okay? Not if nested
   // and misplaced between the headers.
   DCHECK(break_sites_.empty() && continue_sites_.empty());
   builder()->Bind(&loop_header_);
+  for (auto& label : *additional_labels) {
+    builder()->Bind(loop_header_, &label);
+  }
 }
 
 
@@ -106,19 +109,11 @@
   DCHECK(loop_header_.is_bound());
   builder()->Bind(&loop_end_);
   SetBreakTarget(loop_end_);
-  if (next_.is_bound()) {
-    DCHECK(!condition_.is_bound() || next_.offset() >= condition_.offset());
-    SetContinueTarget(next_);
-  } else {
-    DCHECK(condition_.is_bound());
-    DCHECK_GE(condition_.offset(), loop_header_.offset());
-    DCHECK_LE(condition_.offset(), loop_end_.offset());
-    SetContinueTarget(condition_);
-  }
 }
 
-
-void LoopBuilder::SetContinueTarget(const BytecodeLabel& target) {
+void LoopBuilder::SetContinueTarget() {
+  BytecodeLabel target;
+  builder()->Bind(&target);
   BindLabels(target, &continue_sites_);
 }
 
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index e4d376b..8778b26 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -87,27 +87,22 @@
         continue_sites_(builder->zone()) {}
   ~LoopBuilder();
 
-  void LoopHeader();
-  void Condition() { builder()->Bind(&condition_); }
-  void Next() { builder()->Bind(&next_); }
+  void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
   void JumpToHeader() { builder()->Jump(&loop_header_); }
   void JumpToHeaderIfTrue() { builder()->JumpIfTrue(&loop_header_); }
+  void SetContinueTarget();
   void EndLoop();
 
   // This method is called when visiting continue statements in the AST.
-  // Inserts a jump to a unbound label that is patched when the corresponding
-  // SetContinueTarget is called.
+  // Inserts a jump to an unbound label that is patched when SetContinueTarget
+  // is called.
   void Continue() { EmitJump(&continue_sites_); }
   void ContinueIfTrue() { EmitJumpIfTrue(&continue_sites_); }
   void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_sites_); }
   void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
 
  private:
-  void SetContinueTarget(const BytecodeLabel& continue_target);
-
   BytecodeLabel loop_header_;
-  BytecodeLabel condition_;
-  BytecodeLabel next_;
   BytecodeLabel loop_end_;
 
   // Unbound labels that identify jumps for continue statements in the code.
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 2663e4a..4e911eb 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -4,6 +4,7 @@
 
 #include "src/interpreter/interpreter-assembler.h"
 
+#include <limits>
 #include <ostream>
 
 #include "src/code-factory.h"
@@ -24,23 +25,19 @@
 InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
                                            Bytecode bytecode,
                                            OperandScale operand_scale)
-    : compiler::CodeStubAssembler(isolate, zone,
-                                  InterpreterDispatchDescriptor(isolate),
-                                  Code::ComputeFlags(Code::BYTECODE_HANDLER),
-                                  Bytecodes::ToString(bytecode), 0),
+    : CodeStubAssembler(isolate, zone, InterpreterDispatchDescriptor(isolate),
+                        Code::ComputeFlags(Code::BYTECODE_HANDLER),
+                        Bytecodes::ToString(bytecode),
+                        Bytecodes::ReturnCount(bytecode)),
       bytecode_(bytecode),
       operand_scale_(operand_scale),
       accumulator_(this, MachineRepresentation::kTagged),
       accumulator_use_(AccumulatorUse::kNone),
-      context_(this, MachineRepresentation::kTagged),
-      bytecode_array_(this, MachineRepresentation::kTagged),
+      made_call_(false),
       disable_stack_check_across_call_(false),
       stack_pointer_before_call_(nullptr) {
   accumulator_.Bind(
       Parameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
-  context_.Bind(Parameter(InterpreterDispatchDescriptor::kContextParameter));
-  bytecode_array_.Bind(
-      Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter));
   if (FLAG_trace_ignition) {
     TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
   }
@@ -69,23 +66,26 @@
   accumulator_.Bind(value);
 }
 
-Node* InterpreterAssembler::GetContext() { return context_.value(); }
+Node* InterpreterAssembler::GetContext() {
+  return LoadRegister(Register::current_context());
+}
 
 void InterpreterAssembler::SetContext(Node* value) {
   StoreRegister(value, Register::current_context());
-  context_.Bind(value);
 }
 
 Node* InterpreterAssembler::BytecodeOffset() {
   return Parameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter);
 }
 
-Node* InterpreterAssembler::RegisterFileRawPointer() {
-  return Parameter(InterpreterDispatchDescriptor::kRegisterFileParameter);
-}
-
 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
-  return bytecode_array_.value();
+  if (made_call_) {
+    // If we have made a call, restore bytecode array from stack frame in case
+    // the debugger has swapped us to the patched debugger bytecode array.
+    return LoadRegister(Register::bytecode_array());
+  } else {
+    return Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter);
+  }
 }
 
 Node* InterpreterAssembler::DispatchTableRawPointer() {
@@ -93,40 +93,32 @@
 }
 
 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
-  return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
-}
-
-Node* InterpreterAssembler::LoadRegister(int offset) {
-  return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
-              IntPtrConstant(offset));
-}
-
-Node* InterpreterAssembler::LoadRegister(Register reg) {
-  return LoadRegister(IntPtrConstant(-reg.index()));
+  return IntPtrAdd(LoadParentFramePointer(), RegisterFrameOffset(reg_index));
 }
 
 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
   return WordShl(index, kPointerSizeLog2);
 }
 
+Node* InterpreterAssembler::LoadRegister(Register reg) {
+  return Load(MachineType::AnyTagged(), LoadParentFramePointer(),
+              IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
+}
+
 Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
-  return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+  return Load(MachineType::AnyTagged(), LoadParentFramePointer(),
               RegisterFrameOffset(reg_index));
 }
 
-Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
-  return StoreNoWriteBarrier(MachineRepresentation::kTagged,
-                             RegisterFileRawPointer(), IntPtrConstant(offset),
-                             value);
-}
-
 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
-  return StoreRegister(value, IntPtrConstant(-reg.index()));
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kTagged, LoadParentFramePointer(),
+      IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
   return StoreNoWriteBarrier(MachineRepresentation::kTagged,
-                             RegisterFileRawPointer(),
+                             LoadParentFramePointer(),
                              RegisterFrameOffset(reg_index), value);
 }
 
@@ -380,11 +372,6 @@
   return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
 }
 
-Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
-  return Load(MachineType::AnyTagged(), object,
-              IntPtrConstant(offset - kHeapObjectTag));
-}
-
 Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
   return Load(MachineType::AnyTagged(), context,
               IntPtrConstant(Context::SlotOffset(slot_index)));
@@ -406,9 +393,7 @@
 }
 
 Node* InterpreterAssembler::LoadTypeFeedbackVector() {
-  Node* function = Load(
-      MachineType::AnyTagged(), RegisterFileRawPointer(),
-      IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
+  Node* function = LoadRegister(Register::function_closure());
   Node* shared_info =
       LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
   Node* vector =
@@ -417,13 +402,13 @@
 }
 
 void InterpreterAssembler::CallPrologue() {
-  StoreRegister(SmiTag(BytecodeOffset()),
-                InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+  StoreRegister(SmiTag(BytecodeOffset()), Register::bytecode_offset());
 
   if (FLAG_debug_code && !disable_stack_check_across_call_) {
     DCHECK(stack_pointer_before_call_ == nullptr);
     stack_pointer_before_call_ = LoadStackPointer();
   }
+  made_call_ = true;
 }
 
 void InterpreterAssembler::CallEpilogue() {
@@ -434,11 +419,6 @@
     AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
                         kUnexpectedStackPointer);
   }
-
-  // Restore bytecode array from stack frame in case the debugger has swapped us
-  // to the patched debugger bytecode array.
-  bytecode_array_.Bind(LoadRegister(
-      InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 }
 
 Node* InterpreterAssembler::CallJS(Node* function, Node* context,
@@ -481,33 +461,32 @@
 }
 
 void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
-  CodeStubAssembler::Label ok(this);
-  CodeStubAssembler::Label interrupt_check(this);
-  CodeStubAssembler::Label end(this);
+  Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
   Node* budget_offset =
       IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
 
   // Update budget by |weight| and check if it reaches zero.
+  Variable new_budget(this, MachineRepresentation::kWord32);
   Node* old_budget =
       Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
-  Node* new_budget = Int32Add(old_budget, weight);
-  Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0));
+  new_budget.Bind(Int32Add(old_budget, weight));
+  Node* condition =
+      Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
   Branch(condition, &ok, &interrupt_check);
 
   // Perform interrupt and reset budget.
   Bind(&interrupt_check);
-  CallRuntime(Runtime::kInterrupt, GetContext());
-  StoreNoWriteBarrier(MachineRepresentation::kWord32,
-                      BytecodeArrayTaggedPointer(), budget_offset,
-                      Int32Constant(Interpreter::InterruptBudget()));
-  Goto(&end);
+  {
+    CallRuntime(Runtime::kInterrupt, GetContext());
+    new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
+    Goto(&ok);
+  }
 
   // Update budget.
   Bind(&ok);
   StoreNoWriteBarrier(MachineRepresentation::kWord32,
-                      BytecodeArrayTaggedPointer(), budget_offset, new_budget);
-  Goto(&end);
-  Bind(&end);
+                      BytecodeArrayTaggedPointer(), budget_offset,
+                      new_budget.value());
 }
 
 Node* InterpreterAssembler::Advance(int delta) {
@@ -518,16 +497,15 @@
   return IntPtrAdd(BytecodeOffset(), delta);
 }
 
-void InterpreterAssembler::Jump(Node* delta) {
+Node* InterpreterAssembler::Jump(Node* delta) {
   UpdateInterruptBudget(delta);
-  DispatchTo(Advance(delta));
+  return DispatchTo(Advance(delta));
 }
 
 void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
-  CodeStubAssembler::Label match(this);
-  CodeStubAssembler::Label no_match(this);
+  Label match(this), no_match(this);
 
-  Branch(condition, &match, &no_match);
+  BranchIf(condition, &match, &no_match);
   Bind(&match);
   Jump(delta);
   Bind(&no_match);
@@ -543,37 +521,45 @@
   JumpConditional(WordNotEqual(lhs, rhs), delta);
 }
 
-void InterpreterAssembler::Dispatch() {
-  DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
+Node* InterpreterAssembler::Dispatch() {
+  return DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
 }
 
-void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
+Node* InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
   Node* target_bytecode = Load(
       MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
   if (kPointerSize == 8) {
     target_bytecode = ChangeUint32ToUint64(target_bytecode);
   }
 
-  // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
-  // from code object on every dispatch.
-  Node* target_code_object =
+  if (FLAG_trace_ignition_dispatches) {
+    TraceBytecodeDispatch(target_bytecode);
+  }
+
+  Node* target_code_entry =
       Load(MachineType::Pointer(), DispatchTableRawPointer(),
            WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
 
-  DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
+  return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
 }
 
-void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
-                                                     Node* bytecode_offset) {
+Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
+                                                      Node* bytecode_offset) {
+  Node* handler_entry =
+      IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+  return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
+}
+
+Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
+    Node* handler_entry, Node* bytecode_offset) {
   if (FLAG_trace_ignition) {
     TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
   }
 
   InterpreterDispatchDescriptor descriptor(isolate());
-  Node* args[] = {GetAccumulatorUnchecked(), RegisterFileRawPointer(),
-                  bytecode_offset,           BytecodeArrayTaggedPointer(),
-                  DispatchTableRawPointer(), GetContext()};
-  TailCall(descriptor, handler, args, 0);
+  Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
+                  BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
+  return TailCallBytecodeDispatch(descriptor, handler_entry, args);
 }
 
 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -590,6 +576,11 @@
   if (kPointerSize == 8) {
     next_bytecode = ChangeUint32ToUint64(next_bytecode);
   }
+
+  if (FLAG_trace_ignition_dispatches) {
+    TraceBytecodeDispatch(next_bytecode);
+  }
+
   Node* base_index;
   switch (operand_scale) {
     case OperandScale::kDouble:
@@ -603,14 +594,14 @@
       base_index = nullptr;
   }
   Node* target_index = IntPtrAdd(base_index, next_bytecode);
-  Node* target_code_object =
+  Node* target_code_entry =
       Load(MachineType::Pointer(), DispatchTableRawPointer(),
            WordShl(target_index, kPointerSizeLog2));
 
-  DispatchToBytecodeHandler(target_code_object, next_bytecode_offset);
+  DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
 }
 
-void InterpreterAssembler::InterpreterReturn() {
+void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
   // TODO(rmcilroy): Investigate whether it is worth supporting self
   // optimization of primitive functions like FullCodegen.
 
@@ -620,29 +611,14 @@
       Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
                BytecodeOffset());
   UpdateInterruptBudget(profiling_weight);
-
-  Node* exit_trampoline_code_object =
-      HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
-  DispatchToBytecodeHandler(exit_trampoline_code_object);
 }
 
-void InterpreterAssembler::StackCheck() {
-  CodeStubAssembler::Label end(this);
-  CodeStubAssembler::Label ok(this);
-  CodeStubAssembler::Label stack_guard(this);
-
+Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
   Node* sp = LoadStackPointer();
   Node* stack_limit = Load(
       MachineType::Pointer(),
       ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
-  Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit);
-  Branch(condition, &ok, &stack_guard);
-  Bind(&stack_guard);
-  CallRuntime(Runtime::kStackGuard, GetContext());
-  Goto(&end);
-  Bind(&ok);
-  Goto(&end);
-  Bind(&end);
+  return UintPtrLessThan(sp, stack_limit);
 }
 
 void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
@@ -654,18 +630,14 @@
 
 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
                                                BailoutReason bailout_reason) {
-  CodeStubAssembler::Label match(this);
-  CodeStubAssembler::Label no_match(this);
-  CodeStubAssembler::Label end(this);
+  Label ok(this), abort(this, Label::kDeferred);
+  BranchIfWordEqual(lhs, rhs, &ok, &abort);
 
-  Node* condition = WordEqual(lhs, rhs);
-  Branch(condition, &match, &no_match);
-  Bind(&no_match);
+  Bind(&abort);
   Abort(bailout_reason);
-  Goto(&end);
-  Bind(&match);
-  Goto(&end);
-  Bind(&end);
+  Goto(&ok);
+
+  Bind(&ok);
 }
 
 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
@@ -673,6 +645,35 @@
               SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
 }
 
+void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
+  Node* counters_table = ExternalConstant(
+      ExternalReference::interpreter_dispatch_counters(isolate()));
+  Node* source_bytecode_table_index = IntPtrConstant(
+      static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
+
+  Node* counter_offset =
+      WordShl(IntPtrAdd(source_bytecode_table_index, target_bytecode),
+              IntPtrConstant(kPointerSizeLog2));
+  Node* old_counter =
+      Load(MachineType::IntPtr(), counters_table, counter_offset);
+
+  Label counter_ok(this), counter_saturated(this, Label::kDeferred);
+
+  Node* counter_reached_max = WordEqual(
+      old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
+  BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
+
+  Bind(&counter_ok);
+  {
+    Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
+    StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
+                        counter_offset, new_counter);
+    Goto(&counter_saturated);
+  }
+
+  Bind(&counter_saturated);
+}
+
 // static
 bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
@@ -687,6 +688,84 @@
 #endif
 }
 
+Node* InterpreterAssembler::RegisterCount() {
+  Node* bytecode_array = LoadRegister(Register::bytecode_array());
+  Node* frame_size = LoadObjectField(
+      bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32());
+  return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2));
+}
+
+Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
+  if (FLAG_debug_code) {
+    Node* array_size = SmiUntag(LoadFixedArrayBaseLength(array));
+    AbortIfWordNotEqual(
+        array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+  }
+
+  Variable var_index(this, MachineRepresentation::kWord32);
+  var_index.Bind(Int32Constant(0));
+
+  // Iterate over register file and write values into array.
+  // The mapping of register to array index must match that used in
+  // BytecodeGraphBuilder::VisitResumeGenerator.
+  Label loop(this, &var_index), done_loop(this);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    Node* index = var_index.value();
+    Node* condition = Int32LessThan(index, RegisterCount());
+    GotoUnless(condition, &done_loop);
+
+    Node* reg_index =
+        Int32Sub(Int32Constant(Register(0).ToOperand()), index);
+    Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
+
+    StoreFixedArrayElement(array, index, value);
+
+    var_index.Bind(Int32Add(index, Int32Constant(1)));
+    Goto(&loop);
+  }
+  Bind(&done_loop);
+
+  return array;
+}
+
+Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
+  if (FLAG_debug_code) {
+    Node* array_size = SmiUntag(LoadFixedArrayBaseLength(array));
+    AbortIfWordNotEqual(
+        array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+  }
+
+  Variable var_index(this, MachineRepresentation::kWord32);
+  var_index.Bind(Int32Constant(0));
+
+  // Iterate over array and write values into register file.  Also erase the
+  // array contents to not keep them alive artificially.
+  Label loop(this, &var_index), done_loop(this);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    Node* index = var_index.value();
+    Node* condition = Int32LessThan(index, RegisterCount());
+    GotoUnless(condition, &done_loop);
+
+    Node* value = LoadFixedArrayElement(array, index);
+
+    Node* reg_index =
+        Int32Sub(Int32Constant(Register(0).ToOperand()), index);
+    StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
+
+    StoreFixedArrayElement(array, index, StaleRegisterConstant());
+
+    var_index.Bind(Int32Add(index, Int32Constant(1)));
+    Goto(&loop);
+  }
+  Bind(&done_loop);
+
+  return array;
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index 86ecea5..f8d4b7c 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -8,7 +8,7 @@
 #include "src/allocation.h"
 #include "src/base/smart-pointers.h"
 #include "src/builtins.h"
-#include "src/compiler/code-stub-assembler.h"
+#include "src/code-stub-assembler.h"
 #include "src/frames.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/runtime/runtime.h"
@@ -17,7 +17,7 @@
 namespace internal {
 namespace interpreter {
 
-class InterpreterAssembler : public compiler::CodeStubAssembler {
+class InterpreterAssembler : public CodeStubAssembler {
  public:
   InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
                        OperandScale operand_scale);
@@ -50,11 +50,16 @@
   compiler::Node* GetContext();
   void SetContext(compiler::Node* value);
 
+  // Number of registers.
+  compiler::Node* RegisterCount();
+
+  // Backup/restore register file to/from a fixed array of the correct length.
+  compiler::Node* ExportRegisterFile(compiler::Node* array);
+  compiler::Node* ImportRegisterFile(compiler::Node* array);
+
   // Loads from and stores to the interpreter register file.
-  compiler::Node* LoadRegister(int offset);
   compiler::Node* LoadRegister(Register reg);
   compiler::Node* LoadRegister(compiler::Node* reg_index);
-  compiler::Node* StoreRegister(compiler::Node* value, int offset);
   compiler::Node* StoreRegister(compiler::Node* value, Register reg);
   compiler::Node* StoreRegister(compiler::Node* value,
                                 compiler::Node* reg_index);
@@ -69,9 +74,6 @@
   // Load constant at |index| in the constant pool.
   compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
 
-  // Load a field from an object on the heap.
-  compiler::Node* LoadObjectField(compiler::Node* object, int offset);
-
   // Load |slot_index| from |context|.
   compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
   compiler::Node* LoadContextSlot(compiler::Node* context,
@@ -110,12 +112,7 @@
                                compiler::Node* arg_count, int return_size = 1);
 
   // Jump relative to the current bytecode by |jump_offset|.
-  void Jump(compiler::Node* jump_offset);
-
-  // Jump relative to the current bytecode by |jump_offset| if the
-  // |condition| is true. Helper function for JumpIfWordEqual and
-  // JumpIfWordNotEqual.
-  void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+  compiler::Node* Jump(compiler::Node* jump_offset);
 
   // Jump relative to the current bytecode by |jump_offset| if the
   // word values |lhs| and |rhs| are equal.
@@ -127,20 +124,18 @@
   void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                           compiler::Node* jump_offset);
 
-  // Perform a stack guard check.
-  void StackCheck();
+  // Returns true if the stack guard check triggers an interrupt.
+  compiler::Node* StackCheckTriggeredInterrupt();
 
-  // Returns from the function.
-  void InterpreterReturn();
+  // Updates the profiler interrupt budget for a return.
+  void UpdateInterruptBudgetOnReturn();
 
   // Dispatch to the bytecode.
-  void Dispatch();
+  compiler::Node* Dispatch();
 
   // Dispatch to bytecode handler.
-  void DispatchToBytecodeHandler(compiler::Node* handler,
-                                 compiler::Node* bytecode_offset);
-  void DispatchToBytecodeHandler(compiler::Node* handler) {
-    DispatchToBytecodeHandler(handler, BytecodeOffset());
+  compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
+    return DispatchToBytecodeHandler(handler, BytecodeOffset());
   }
 
   // Dispatch bytecode as wide operand variant.
@@ -148,14 +143,14 @@
 
   // Abort with the given bailout reason.
   void Abort(BailoutReason bailout_reason);
+  void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+                           BailoutReason bailout_reason);
 
  protected:
   Bytecode bytecode() const { return bytecode_; }
   static bool TargetSupportsUnalignedAccess();
 
  private:
-  // Returns a raw pointer to start of the register file on the stack.
-  compiler::Node* RegisterFileRawPointer();
   // Returns a tagged pointer to the current function's BytecodeArray object.
   compiler::Node* BytecodeArrayTaggedPointer();
   // Returns the offset from the BytecodeArrayPointer of the current bytecode.
@@ -173,6 +168,9 @@
   void CallPrologue() override;
   void CallEpilogue() override;
 
+  // Increment the dispatch counter for the (current, next) bytecode pair.
+  void TraceBytecodeDispatch(compiler::Node* target_index);
+
   // Traces the current bytecode by calling |function_id|.
   void TraceBytecode(Runtime::FunctionId function_id);
 
@@ -206,17 +204,26 @@
   compiler::Node* BytecodeUnsignedOperand(int operand_index,
                                           OperandSize operand_size);
 
+  // Jump relative to the current bytecode by |jump_offset| if the
+  // |condition| is true. Helper function for JumpIfWordEqual and
+  // JumpIfWordNotEqual.
+  void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+
   // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
   // update BytecodeOffset() itself.
   compiler::Node* Advance(int delta);
   compiler::Node* Advance(compiler::Node* delta);
 
   // Starts next instruction dispatch at |new_bytecode_offset|.
-  void DispatchTo(compiler::Node* new_bytecode_offset);
+  compiler::Node* DispatchTo(compiler::Node* new_bytecode_offset);
 
-  // Abort operations for debug code.
-  void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
-                           BailoutReason bailout_reason);
+  // Dispatch to the bytecode handler with code offset |handler|.
+  compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
+                                            compiler::Node* bytecode_offset);
+
+  // Dispatch to the bytecode handler with code entry point |handler_entry|.
+  compiler::Node* DispatchToBytecodeHandlerEntry(
+      compiler::Node* handler_entry, compiler::Node* bytecode_offset);
 
   OperandScale operand_scale() const { return operand_scale_; }
 
@@ -224,8 +231,7 @@
   OperandScale operand_scale_;
   CodeStubAssembler::Variable accumulator_;
   AccumulatorUse accumulator_use_;
-  CodeStubAssembler::Variable context_;
-  CodeStubAssembler::Variable bytecode_array_;
+  bool made_call_;
 
   bool disable_stack_check_across_call_;
   compiler::Node* stack_pointer_before_call_;
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 5084300..a42da50 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -4,6 +4,8 @@
 
 #include "src/interpreter/interpreter.h"
 
+#include <fstream>
+
 #include "src/ast/prettyprinter.h"
 #include "src/code-factory.h"
 #include "src/compiler.h"
@@ -20,6 +22,8 @@
 namespace interpreter {
 
 using compiler::Node;
+typedef CodeStubAssembler::Label Label;
+typedef CodeStubAssembler::Variable Variable;
 
 #define __ assembler->
 
@@ -28,15 +32,26 @@
 }
 
 void Interpreter::Initialize() {
-  DCHECK(FLAG_ignition);
   if (IsDispatchTableInitialized()) return;
   Zone zone(isolate_->allocator());
   HandleScope scope(isolate_);
 
+  if (FLAG_trace_ignition_dispatches) {
+    static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
+    bytecode_dispatch_counters_table_.Reset(
+        new uintptr_t[kBytecodeCount * kBytecodeCount]);
+    memset(bytecode_dispatch_counters_table_.get(), 0,
+           sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount);
+  }
+
   // Generate bytecode handlers for all bytecodes and scales.
-  for (OperandScale operand_scale = OperandScale::kSingle;
-       operand_scale <= OperandScale::kMaxValid;
-       operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+  const OperandScale kOperandScales[] = {
+#define VALUE(Name, _) OperandScale::k##Name,
+      OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+  };
+
+  for (OperandScale operand_scale : kOperandScales) {
 #define GENERATE_CODE(Name, ...)                                               \
   {                                                                            \
     if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) {     \
@@ -45,7 +60,7 @@
       Do##Name(&assembler);                                                    \
       Handle<Code> code = assembler.GenerateCode();                            \
       size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale);  \
-      dispatch_table_[index] = *code;                                          \
+      dispatch_table_[index] = code->entry();                                  \
       TraceCodegen(code);                                                      \
       LOG_CODE_EVENT(                                                          \
           isolate_,                                                            \
@@ -73,7 +88,8 @@
   DCHECK(IsDispatchTableInitialized());
   DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
   size_t index = GetDispatchTableIndex(bytecode, operand_scale);
-  return dispatch_table_[index];
+  Address code_entry = dispatch_table_[index];
+  return Code::GetCodeFromTargetAddress(code_entry);
 }
 
 // static
@@ -81,18 +97,30 @@
                                           OperandScale operand_scale) {
   static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
   size_t index = static_cast<size_t>(bytecode);
-  OperandScale current_scale = OperandScale::kSingle;
-  while (current_scale != operand_scale) {
-    index += kEntriesPerOperandScale;
-    current_scale = Bytecodes::NextOperandScale(current_scale);
+  switch (operand_scale) {
+    case OperandScale::kSingle:
+      return index;
+    case OperandScale::kDouble:
+      return index + kEntriesPerOperandScale;
+    case OperandScale::kQuadruple:
+      return index + 2 * kEntriesPerOperandScale;
   }
-  return index;
+  UNREACHABLE();
+  return 0;
 }
 
 void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
-  v->VisitPointers(
-      reinterpret_cast<Object**>(&dispatch_table_[0]),
-      reinterpret_cast<Object**>(&dispatch_table_[0] + kDispatchTableSize));
+  for (int i = 0; i < kDispatchTableSize; i++) {
+    Address code_entry = dispatch_table_[i];
+    Object* code = code_entry == nullptr
+                       ? nullptr
+                       : Code::GetCodeFromTargetAddress(code_entry);
+    Object* old_code = code;
+    v->VisitPointer(&code);
+    if (code != old_code) {
+      dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
+    }
+  }
 }
 
 // static
@@ -103,6 +131,8 @@
 }
 
 bool Interpreter::MakeBytecode(CompilationInfo* info) {
+  RuntimeCallTimerScope runtimeTimer(info->isolate(),
+                                     &RuntimeCallStats::CompileIgnition);
   TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
   TRACE_EVENT0("v8", "V8.CompileIgnition");
 
@@ -131,8 +161,8 @@
   }
 #endif  // DEBUG
 
-  BytecodeGenerator generator(info->isolate(), info->zone());
-  Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
+  BytecodeGenerator generator(info);
+  Handle<BytecodeArray> bytecodes = generator.MakeBytecode();
 
   if (generator.HasStackOverflow()) return false;
 
@@ -148,9 +178,11 @@
 }
 
 bool Interpreter::IsDispatchTableInitialized() {
-  if (FLAG_trace_ignition || FLAG_trace_ignition_codegen) {
-    // Regenerate table to add bytecode tracing operations
-    // or to print the assembly code generated by TurboFan.
+  if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
+      FLAG_trace_ignition_dispatches) {
+    // Regenerate table to add bytecode tracing operations,
+    // print the assembly code generated by TurboFan,
+    // or instrument handlers with dispatch counters.
     return false;
   }
   return dispatch_table_[0] != nullptr;
@@ -168,9 +200,10 @@
 
 const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
 #ifdef ENABLE_DISASSEMBLER
-#define RETURN_NAME(Name, ...)                                         \
-  if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code) { \
-    return #Name;                                                      \
+#define RETURN_NAME(Name, ...)                                 \
+  if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
+      code->entry()) {                                         \
+    return #Name;                                              \
   }
   BYTECODE_LIST(RETURN_NAME)
 #undef RETURN_NAME
@@ -178,6 +211,62 @@
   return nullptr;
 }
 
+uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
+  int from_index = Bytecodes::ToByte(from);
+  int to_index = Bytecodes::ToByte(to);
+  return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes +
+                                           to_index];
+}
+
+Local<v8::Object> Interpreter::GetDispatchCountersObject() {
+  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+  Local<v8::Context> context = isolate->GetCurrentContext();
+
+  Local<v8::Object> counters_map = v8::Object::New(isolate);
+
+  // Output is a JSON-encoded object of objects.
+  //
+  // The keys on the top level object are source bytecodes,
+  // and corresponding value are objects. Keys on these last are the
+  // destinations of the dispatch and the value associated is a counter for
+  // the correspondent source-destination dispatch chain.
+  //
+  // Only non-zero counters are written to file, but an entry in the top-level
+  // object is always present, even if the value is empty because all counters
+  // for that source are zero.
+
+  for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
+    Bytecode from_bytecode = Bytecodes::FromByte(from_index);
+    Local<v8::Object> counters_row = v8::Object::New(isolate);
+
+    for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
+      Bytecode to_bytecode = Bytecodes::FromByte(to_index);
+      uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
+
+      if (counter > 0) {
+        std::string to_name = Bytecodes::ToString(to_bytecode);
+        Local<v8::String> to_name_object =
+            v8::String::NewFromUtf8(isolate, to_name.c_str(),
+                                    NewStringType::kNormal)
+                .ToLocalChecked();
+        Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
+        CHECK(counters_row->Set(context, to_name_object, counter_object)
+                  .IsJust());
+      }
+    }
+
+    std::string from_name = Bytecodes::ToString(from_bytecode);
+    Local<v8::String> from_name_object =
+        v8::String::NewFromUtf8(isolate, from_name.c_str(),
+                                NewStringType::kNormal)
+            .ToLocalChecked();
+
+    CHECK(counters_map->Set(context, from_name_object, counters_row).IsJust());
+  }
+
+  return counters_map;
+}
+
 // LdaZero
 //
 // Load literal '0' into the accumulator.
@@ -640,12 +729,22 @@
   __ Dispatch();
 }
 
+template <class Generator>
+void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* lhs = __ LoadRegister(reg_index);
+  Node* rhs = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result = Generator::Generate(assembler, lhs, rhs, context);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
 
 // Add <src>
 //
 // Add register <src> to accumulator.
 void Interpreter::DoAdd(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::Add(isolate_), assembler);
+  DoBinaryOp<AddStub>(assembler);
 }
 
 
@@ -653,7 +752,7 @@
 //
 // Subtract register <src> from accumulator.
 void Interpreter::DoSub(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::Subtract(isolate_), assembler);
+  DoBinaryOp<SubtractStub>(assembler);
 }
 
 
@@ -661,7 +760,7 @@
 //
 // Multiply accumulator by register <src>.
 void Interpreter::DoMul(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kMultiply, assembler);
+  DoBinaryOp<MultiplyStub>(assembler);
 }
 
 
@@ -669,7 +768,7 @@
 //
 // Divide register <src> by accumulator.
 void Interpreter::DoDiv(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kDivide, assembler);
+  DoBinaryOp<DivideStub>(assembler);
 }
 
 
@@ -677,7 +776,7 @@
 //
 // Modulo register <src> by accumulator.
 void Interpreter::DoMod(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kModulus, assembler);
+  DoBinaryOp<ModulusStub>(assembler);
 }
 
 
@@ -685,7 +784,7 @@
 //
 // BitwiseOr register <src> to accumulator.
 void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::BitwiseOr(isolate_), assembler);
+  DoBinaryOp<BitwiseOrStub>(assembler);
 }
 
 
@@ -693,7 +792,7 @@
 //
 // BitwiseXor register <src> to accumulator.
 void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::BitwiseXor(isolate_), assembler);
+  DoBinaryOp<BitwiseXorStub>(assembler);
 }
 
 
@@ -701,7 +800,7 @@
 //
 // BitwiseAnd register <src> to accumulator.
 void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::BitwiseAnd(isolate_), assembler);
+  DoBinaryOp<BitwiseAndStub>(assembler);
 }
 
 
@@ -712,7 +811,7 @@
 // before the operation. 5 lsb bits from the accumulator are used as count
 // i.e. <src> << (accumulator & 0x1F).
 void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kShiftLeft, assembler);
+  DoBinaryOp<ShiftLeftStub>(assembler);
 }
 
 
@@ -723,7 +822,7 @@
 // accumulator to uint32 before the operation. 5 lsb bits from the accumulator
 // are used as count i.e. <src> >> (accumulator & 0x1F).
 void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kShiftRight, assembler);
+  DoBinaryOp<ShiftRightStub>(assembler);
 }
 
 
@@ -734,62 +833,77 @@
 // uint32 before the operation 5 lsb bits from the accumulator are used as
 // count i.e. <src> << (accumulator & 0x1F).
 void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kShiftRightLogical, assembler);
+  DoBinaryOp<ShiftRightLogicalStub>(assembler);
 }
 
-void Interpreter::DoCountOp(Runtime::FunctionId function_id,
-                            InterpreterAssembler* assembler) {
+template <class Generator>
+void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
-  Node* one = __ NumberConstant(1);
   Node* context = __ GetContext();
-  Node* result = __ CallRuntime(function_id, context, value, one);
+  Node* result = Generator::Generate(assembler, value, context);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
 // Inc
 //
 // Increments value in the accumulator by one.
 void Interpreter::DoInc(InterpreterAssembler* assembler) {
-  DoCountOp(Runtime::kAdd, assembler);
+  DoUnaryOp<IncStub>(assembler);
 }
 
-
 // Dec
 //
 // Decrements value in the accumulator by one.
 void Interpreter::DoDec(InterpreterAssembler* assembler) {
-  DoCountOp(Runtime::kSubtract, assembler);
+  DoUnaryOp<DecStub>(assembler);
 }
 
+void Interpreter::DoLogicalNotOp(Node* value, InterpreterAssembler* assembler) {
+  Label if_true(assembler), if_false(assembler), end(assembler);
+  Node* true_value = __ BooleanConstant(true);
+  Node* false_value = __ BooleanConstant(false);
+  __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
+  __ Bind(&if_true);
+  {
+    __ SetAccumulator(false_value);
+    __ Goto(&end);
+  }
+  __ Bind(&if_false);
+  {
+    if (FLAG_debug_code) {
+      __ AbortIfWordNotEqual(value, false_value,
+                             BailoutReason::kExpectedBooleanValue);
+    }
+    __ SetAccumulator(true_value);
+    __ Goto(&end);
+  }
+  __ Bind(&end);
+}
 
-// LogicalNot
+// ToBooleanLogicalNot
 //
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
-void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
+void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
   Callable callable = CodeFactory::ToBoolean(isolate_);
   Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* to_boolean_value =
       __ CallStub(callable.descriptor(), target, context, accumulator);
-  InterpreterAssembler::Label if_true(assembler), if_false(assembler);
-  Node* true_value = __ BooleanConstant(true);
-  Node* false_value = __ BooleanConstant(false);
-  Node* condition = __ WordEqual(to_boolean_value, true_value);
-  __ Branch(condition, &if_true, &if_false);
-  __ Bind(&if_true);
-  {
-    __ SetAccumulator(false_value);
-    __ Dispatch();
-  }
-  __ Bind(&if_false);
-  {
-    __ SetAccumulator(true_value);
-    __ Dispatch();
-  }
+  DoLogicalNotOp(to_boolean_value, assembler);
+  __ Dispatch();
+}
+
+// LogicalNot
+//
+// Perform logical-not on the accumulator, which must already be a boolean
+// value.
+void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
+  Node* value = __ GetAccumulator();
+  DoLogicalNotOp(value, assembler);
+  __ Dispatch();
 }
 
 // TypeOf
@@ -1058,7 +1172,7 @@
 // Test if the object referenced by the register operand is a property of the
 // object referenced by the accumulator.
 void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kHasProperty, assembler);
+  DoBinaryOp(CodeFactory::HasProperty(isolate_), assembler);
 }
 
 
@@ -1067,7 +1181,7 @@
 // Test if the object referenced by the <src> register is an an instance of type
 // referenced by the accumulator.
 void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInstanceOf, assembler);
+  DoBinaryOp(CodeFactory::InstanceOf(isolate_), assembler);
 }
 
 void Interpreter::DoTypeConversionOp(Callable callable,
@@ -1316,23 +1430,6 @@
   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
 }
 
-void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
-                                  InterpreterAssembler* assembler) {
-  Node* index = __ BytecodeOperandIdx(0);
-  Node* constant_elements = __ LoadConstantPoolEntry(index);
-  Node* literal_index_raw = __ BytecodeOperandIdx(1);
-  Node* literal_index = __ SmiTag(literal_index_raw);
-  Node* flags_raw = __ BytecodeOperandFlag(2);
-  Node* flags = __ SmiTag(flags_raw);
-  Node* closure = __ LoadRegister(Register::function_closure());
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(function_id, context, closure, literal_index,
-                                constant_elements, flags);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-
 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
 //
 // Creates a regular expression literal for literal index <literal_idx> with
@@ -1359,15 +1456,67 @@
 // Creates an array literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant_elements = __ LoadConstantPoolEntry(index);
+  Node* literal_index_raw = __ BytecodeOperandIdx(1);
+  Node* literal_index = __ SmiTag(literal_index_raw);
+  Node* flags_raw = __ BytecodeOperandFlag(2);
+  Node* flags = __ SmiTag(flags_raw);
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
+                                literal_index, constant_elements, flags);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
 //
-// Creates an object literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
+// Creates an object literal for literal index <literal_idx> with
+// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
-  DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+  Node* literal_index_raw = __ BytecodeOperandIdx(1);
+  Node* literal_index = __ SmiTag(literal_index_raw);
+  Node* bytecode_flags = __ BytecodeOperandFlag(2);
+  Node* closure = __ LoadRegister(Register::function_closure());
+
+  // Check if we can do a fast clone or have to call the runtime.
+  Label if_fast_clone(assembler),
+      if_not_fast_clone(assembler, Label::kDeferred);
+  Node* fast_clone_properties_count =
+      __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
+          bytecode_flags);
+  __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+
+  __ Bind(&if_fast_clone);
+  {
+    // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
+    Node* result = FastCloneShallowObjectStub::GenerateFastPath(
+        assembler, &if_not_fast_clone, closure, literal_index,
+        fast_clone_properties_count);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+
+  __ Bind(&if_not_fast_clone);
+  {
+    // If we can't do a fast clone, call into the runtime.
+    Node* index = __ BytecodeOperandIdx(0);
+    Node* constant_elements = __ LoadConstantPoolEntry(index);
+    Node* context = __ GetContext();
+
+    STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
+    Node* flags_raw = __ Word32And(
+        bytecode_flags,
+        __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
+    Node* flags = __ SmiTag(flags_raw);
+
+    Node* result =
+        __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+                       literal_index, constant_elements, flags);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
 }
 
 // CreateClosure <index> <tenured>
@@ -1394,10 +1543,40 @@
 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* context = __ GetContext();
-  Node* result =
-      __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
-  __ SetAccumulator(result);
-  __ Dispatch();
+
+  Label if_duplicate_parameters(assembler, Label::kDeferred);
+  Label if_not_duplicate_parameters(assembler);
+
+  // Check if function has duplicate parameters.
+  // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
+  // duplicate parameters.
+  Node* shared_info =
+      __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+  Node* compiler_hints = __ LoadObjectField(
+      shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
+      MachineType::Uint8());
+  Node* duplicate_parameters_bit = __ Int32Constant(
+      1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
+  Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
+  __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
+
+  __ Bind(&if_not_duplicate_parameters);
+  {
+    // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
+    Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
+    Node* target = __ HeapConstant(callable.code());
+    Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
+
+  __ Bind(&if_duplicate_parameters);
+  {
+    Node* result =
+        __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
+    __ SetAccumulator(result);
+    __ Dispatch();
+  }
 }
 
 
@@ -1405,7 +1584,8 @@
 //
 // Creates a new unmapped arguments object.
 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::FastNewStrictArguments(isolate_);
+  // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
+  Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
   Node* target = __ HeapConstant(callable.code());
   Node* context = __ GetContext();
   Node* closure = __ LoadRegister(Register::function_closure());
@@ -1418,7 +1598,8 @@
 //
 // Creates a new rest parameter array.
 void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::FastNewRestParameter(isolate_);
+  // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
+  Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
   Node* target = __ HeapConstant(callable.code());
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* context = __ GetContext();
@@ -1431,8 +1612,20 @@
 //
 // Performs a stack guard check.
 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
-  __ StackCheck();
+  Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
+
+  Node* interrupt = __ StackCheckTriggeredInterrupt();
+  __ BranchIf(interrupt, &stack_check_interrupt, &ok);
+
+  __ Bind(&ok);
   __ Dispatch();
+
+  __ Bind(&stack_check_interrupt);
+  {
+    Node* context = __ GetContext();
+    __ CallRuntime(Runtime::kStackGuard, context);
+    __ Dispatch();
+  }
 }
 
 // Throw
@@ -1463,7 +1656,9 @@
 //
 // Return the value in the accumulator.
 void Interpreter::DoReturn(InterpreterAssembler* assembler) {
-  __ InterpreterReturn();
+  __ UpdateInterruptBudgetOnReturn();
+  Node* accumulator = __ GetAccumulator();
+  __ Return(accumulator);
 }
 
 // Debugger
@@ -1525,13 +1720,14 @@
   Node* cache_array = __ LoadRegister(cache_array_reg);
 
   // Load the next key from the enumeration array.
-  Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
+  Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
+                                       CodeStubAssembler::SMI_PARAMETERS);
 
   // Check if we can use the for-in fast path potentially using the enum cache.
-  InterpreterAssembler::Label if_fast(assembler), if_slow(assembler);
+  Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
   Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
   Node* condition = __ WordEqual(receiver_map, cache_type);
-  __ Branch(condition, &if_fast, &if_slow);
+  __ BranchIf(condition, &if_fast, &if_slow);
   __ Bind(&if_fast);
   {
     // Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -1545,8 +1741,8 @@
     Node* type_feedback_vector = __ LoadTypeFeedbackVector();
     Node* megamorphic_sentinel =
         __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
-    __ StoreFixedArrayElementNoWriteBarrier(type_feedback_vector, vector_index,
-                                            megamorphic_sentinel);
+    __ StoreFixedArrayElement(type_feedback_vector, vector_index,
+                              megamorphic_sentinel, SKIP_WRITE_BARRIER);
 
     // Need to filter the {key} for the {receiver}.
     Node* context = __ GetContext();
@@ -1567,21 +1763,20 @@
   Node* cache_length = __ LoadRegister(cache_length_reg);
 
   // Check if {index} is at {cache_length} already.
-  InterpreterAssembler::Label if_true(assembler), if_false(assembler);
-  Node* condition = __ WordEqual(index, cache_length);
-  __ Branch(condition, &if_true, &if_false);
+  Label if_true(assembler), if_false(assembler), end(assembler);
+  __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
   __ Bind(&if_true);
   {
-    Node* result = __ BooleanConstant(true);
-    __ SetAccumulator(result);
-    __ Dispatch();
+    __ SetAccumulator(__ BooleanConstant(true));
+    __ Goto(&end);
   }
   __ Bind(&if_false);
   {
-    Node* result = __ BooleanConstant(false);
-    __ SetAccumulator(result);
-    __ Dispatch();
+    __ SetAccumulator(__ BooleanConstant(false));
+    __ Goto(&end);
   }
+  __ Bind(&end);
+  __ Dispatch();
 }
 
 // ForInStep <index>
@@ -1618,6 +1813,53 @@
   __ Abort(kInvalidBytecode);
 }
 
+// Nop
+//
+// No operation.
+void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); }
+
+// SuspendGenerator <generator>
+//
+// Exports the register file and stores it into the generator.  Also stores the
+// current context and the state given in the accumulator into the generator.
+void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
+  Node* generator_reg = __ BytecodeOperandReg(0);
+  Node* generator = __ LoadRegister(generator_reg);
+
+  Node* array =
+      __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
+  Node* context = __ GetContext();
+  Node* state = __ GetAccumulator();
+
+  __ ExportRegisterFile(array);
+  __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
+  __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
+
+  __ Dispatch();
+}
+
+// ResumeGenerator <generator>
+//
+// Imports the register file stored in the generator. Also loads the
+// generator's state and stores it in the accumulator, before overwriting it
+// with kGeneratorExecuting.
+void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
+  Node* generator_reg = __ BytecodeOperandReg(0);
+  Node* generator = __ LoadRegister(generator_reg);
+
+  __ ImportRegisterFile(
+      __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset));
+
+  Node* old_state =
+      __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
+  Node* new_state = __ Int32Constant(JSGeneratorObject::kGeneratorExecuting);
+  __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
+      __ SmiTag(new_state));
+  __ SetAccumulator(old_state);
+
+  __ Dispatch();
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index ea50faa..d774d8b 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -21,6 +21,10 @@
 class Callable;
 class CompilationInfo;
 
+namespace compiler {
+class Node;
+}  // namespace compiler
+
 namespace interpreter {
 
 class InterpreterAssembler;
@@ -49,10 +53,16 @@
   void TraceCodegen(Handle<Code> code);
   const char* LookupNameOfBytecodeHandler(Code* code);
 
+  Local<v8::Object> GetDispatchCountersObject();
+
   Address dispatch_table_address() {
     return reinterpret_cast<Address>(&dispatch_table_[0]);
   }
 
+  Address bytecode_dispatch_counters_table() {
+    return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
+  }
+
  private:
 // Bytecode handler generator functions.
 #define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
@@ -60,16 +70,20 @@
   BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
 #undef DECLARE_BYTECODE_HANDLER_GENERATOR
 
-  // Generates code to perform the binary operations via |callable|.
+  // Generates code to perform the binary operation via |callable|.
   void DoBinaryOp(Callable callable, InterpreterAssembler* assembler);
 
-  // Generates code to perform the binary operations via |function_id|.
+  // Generates code to perform the binary operation via |function_id|.
   void DoBinaryOp(Runtime::FunctionId function_id,
                   InterpreterAssembler* assembler);
 
-  // Generates code to perform the count operations via |function_id|.
-  void DoCountOp(Runtime::FunctionId function_id,
-                 InterpreterAssembler* assembler);
+  // Generates code to perform the binary operation via |Generator|.
+  template <class Generator>
+  void DoBinaryOp(InterpreterAssembler* assembler);
+
+  // Generates code to perform the unary operation via |Generator|.
+  template <class Generator>
+  void DoUnaryOp(InterpreterAssembler* assembler);
 
   // Generates code to perform the comparison operation associated with
   // |compare_op|.
@@ -114,9 +128,8 @@
   // Generates code to perform a type conversion.
   void DoTypeConversionOp(Callable callable, InterpreterAssembler* assembler);
 
-  // Generates code ro create a literal via |function_id|.
-  void DoCreateLiteral(Runtime::FunctionId function_id,
-                       InterpreterAssembler* assembler);
+  // Generates code to perform logical-not on boolean |value|.
+  void DoLogicalNotOp(compiler::Node* value, InterpreterAssembler* assembler);
 
   // Generates code to perform delete via function_id.
   void DoDelete(Runtime::FunctionId function_id,
@@ -130,6 +143,8 @@
   void DoStoreLookupSlot(LanguageMode language_mode,
                          InterpreterAssembler* assembler);
 
+  uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
+
   // Get dispatch table index of bytecode.
   static size_t GetDispatchTableIndex(Bytecode bytecode,
                                       OperandScale operand_scale);
@@ -138,9 +153,11 @@
 
   static const int kNumberOfWideVariants = 3;
   static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
+  static const int kNumberOfBytecodes = static_cast<int>(Bytecode::kLast) + 1;
 
   Isolate* isolate_;
-  Code* dispatch_table_[kDispatchTableSize];
+  Address dispatch_table_[kDispatchTableSize];
+  v8::base::SmartArrayPointer<uintptr_t> bytecode_dispatch_counters_table_;
 
   DISALLOW_COPY_AND_ASSIGN(Interpreter);
 };
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
index 99a865b..65bfa20 100644
--- a/src/interpreter/source-position-table.cc
+++ b/src/interpreter/source-position-table.cc
@@ -115,53 +115,34 @@
 
 }  // namespace
 
-void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
-                                                      int source_position) {
+void SourcePositionTableBuilder::AddPosition(size_t bytecode_offset,
+                                             int source_position,
+                                             bool is_statement) {
   int offset = static_cast<int>(bytecode_offset);
-  AddEntry({offset, source_position, true});
-}
-
-void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
-                                                       int source_position) {
-  int offset = static_cast<int>(bytecode_offset);
-  AddEntry({offset, source_position, false});
+  AddEntry({offset, source_position, is_statement});
 }
 
 void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
-  // Don't encode a new entry if this bytecode already has a source position
-  // assigned.
-  if (candidate_.bytecode_offset == entry.bytecode_offset) {
-    if (entry.is_statement) candidate_ = entry;
-    return;
-  }
-
-  CommitEntry();
-  candidate_ = entry;
-}
-
-void SourcePositionTableBuilder::CommitEntry() {
-  if (candidate_.bytecode_offset == kUninitializedCandidateOffset) return;
-  PositionTableEntry tmp(candidate_);
+  PositionTableEntry tmp(entry);
   SubtractFromEntry(tmp, previous_);
   EncodeEntry(bytes_, tmp);
-  previous_ = candidate_;
+  previous_ = entry;
 
-  if (candidate_.is_statement) {
+  if (entry.is_statement) {
     LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddStatementPositionEvent(
-                                 jit_handler_data_, candidate_.bytecode_offset,
-                                 candidate_.source_position));
+                                 jit_handler_data_, entry.bytecode_offset,
+                                 entry.source_position));
   }
   LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddPositionEvent(
-                               jit_handler_data_, candidate_.bytecode_offset,
-                               candidate_.source_position));
+                               jit_handler_data_, entry.bytecode_offset,
+                               entry.source_position));
 
 #ifdef ENABLE_SLOW_DCHECKS
-  raw_entries_.push_back(candidate_);
+  raw_entries_.push_back(entry);
 #endif
 }
 
 Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable() {
-  CommitEntry();
   if (bytes_.empty()) return isolate_->factory()->empty_byte_array();
 
   Handle<ByteArray> table = isolate_->factory()->NewByteArray(
diff --git a/src/interpreter/source-position-table.h b/src/interpreter/source-position-table.h
index 3ac58d6..220ef39 100644
--- a/src/interpreter/source-position-table.h
+++ b/src/interpreter/source-position-table.h
@@ -34,7 +34,7 @@
   bool is_statement;
 };
 
-class SourcePositionTableBuilder : public PositionsRecorder {
+class SourcePositionTableBuilder final : public PositionsRecorder {
  public:
   SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
       : isolate_(isolate),
@@ -42,16 +42,14 @@
 #ifdef ENABLE_SLOW_DCHECKS
         raw_entries_(zone),
 #endif
-        candidate_(kUninitializedCandidateOffset, 0, false) {
+        previous_() {
   }
 
-  void AddStatementPosition(size_t bytecode_offset, int source_position);
-  void AddExpressionPosition(size_t bytecode_offset, int source_position);
+  void AddPosition(size_t bytecode_offset, int source_position,
+                   bool is_statement);
   Handle<ByteArray> ToSourcePositionTable();
 
  private:
-  static const int kUninitializedCandidateOffset = -1;
-
   void AddEntry(const PositionTableEntry& entry);
   void CommitEntry();
 
@@ -60,7 +58,6 @@
 #ifdef ENABLE_SLOW_DCHECKS
   ZoneVector<PositionTableEntry> raw_entries_;
 #endif
-  PositionTableEntry candidate_;  // Next entry to be written, if initialized.
   PositionTableEntry previous_;   // Previously written entry, to compute delta.
 };
 
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index da36f76..48ea0aa 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -73,7 +73,11 @@
 
 
 Handle<JSGlobalObject> Isolate::global_object() {
-  return Handle<JSGlobalObject>(context()->global_object(), this);
+  return handle(context()->global_object(), this);
+}
+
+Handle<JSObject> Isolate::global_proxy() {
+  return handle(context()->global_proxy(), this);
 }
 
 
@@ -111,11 +115,17 @@
   // done here. In place, there are mjsunit tests harmony/array-species* which
   // ensure that behavior is correct in various invalid protector cases.
 
-  PropertyCell* species_cell = heap()->species_protector();
+  Cell* species_cell = heap()->species_protector();
   return species_cell->value()->IsSmi() &&
          Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
 }
 
+bool Isolate::IsHasInstanceLookupChainIntact() {
+  if (!FLAG_harmony_instanceof) return true;
+  PropertyCell* has_instance_cell = heap()->has_instance_protector();
+  return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/isolate.cc b/src/isolate.cc
index c9f0111..9d35113 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -321,7 +321,6 @@
 // yet.
 static bool IsVisibleInStackTrace(JSFunction* fun,
                                   Object* caller,
-                                  Object* receiver,
                                   bool* seen_caller) {
   if ((fun == caller) && !(*seen_caller)) {
     *seen_caller = true;
@@ -397,9 +396,7 @@
           Handle<JSFunction> fun = frames[i].function();
           Handle<Object> recv = frames[i].receiver();
           // Filter out internal frames that we do not want to show.
-          if (!IsVisibleInStackTrace(*fun, *caller, *recv, &seen_caller)) {
-            continue;
-          }
+          if (!IsVisibleInStackTrace(*fun, *caller, &seen_caller)) continue;
           // Filter out frames from other security contexts.
           if (!this->context()->HasSameSecurityTokenAs(fun->context())) {
             continue;
@@ -433,14 +430,13 @@
         Code* code = wasm_frame->unchecked_code();
         Handle<AbstractCode> abstract_code =
             Handle<AbstractCode>(AbstractCode::cast(code));
-        Handle<JSFunction> fun = factory()->NewFunction(
-            factory()->NewStringFromAsciiChecked("<WASM>"));
+        int offset =
+            static_cast<int>(wasm_frame->pc() - code->instruction_start());
         elements = MaybeGrow(this, elements, cursor, cursor + 4);
-        // TODO(jfb) Pass module object.
-        elements->set(cursor++, *factory()->undefined_value());
-        elements->set(cursor++, *fun);
+        elements->set(cursor++, wasm_frame->wasm_obj());
+        elements->set(cursor++, Smi::FromInt(wasm_frame->function_index()));
         elements->set(cursor++, *abstract_code);
-        elements->set(cursor++, Internals::IntToSmi(0));
+        elements->set(cursor++, Smi::FromInt(offset));
         frames_seen++;
       } break;
 
@@ -542,11 +538,16 @@
     }
   }
 
+  Handle<JSObject> NewStackFrameObject(FrameSummary& summ) {
+    int position = summ.abstract_code()->SourcePosition(summ.code_offset());
+    return NewStackFrameObject(summ.function(), position,
+                               summ.is_constructor());
+  }
+
   Handle<JSObject> NewStackFrameObject(Handle<JSFunction> fun, int position,
                                        bool is_constructor) {
     Handle<JSObject> stack_frame =
         factory()->NewJSObject(isolate_->object_function());
-
     Handle<Script> script(Script::cast(fun->shared()->script()));
 
     if (!line_key_.is_null()) {
@@ -556,12 +557,14 @@
       int relative_line_number = line_number - script_line_offset;
       if (!column_key_.is_null() && relative_line_number >= 0) {
         Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
-        int start = (relative_line_number == 0) ? 0 :
-            Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+        int start =
+            (relative_line_number == 0)
+                ? 0
+                : Smi::cast(line_ends->get(relative_line_number - 1))->value() +
+                      1;
         int column_offset = position - start;
         if (relative_line_number == 0) {
-          // For the case where the code is on the same line as the script
-          // tag.
+          // For the case where the code is on the same line as the script tag.
           column_offset += script->column_offset();
         }
         JSObject::AddProperty(stack_frame, column_key_,
@@ -589,22 +592,51 @@
                             NONE);
     }
 
-    if (!function_key_.is_null()) {
-      Handle<Object> fun_name = JSFunction::GetDebugName(fun);
-      JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
-    }
-
     if (!eval_key_.is_null()) {
       Handle<Object> is_eval = factory()->ToBoolean(
           script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
       JSObject::AddProperty(stack_frame, eval_key_, is_eval, NONE);
     }
 
+    if (!function_key_.is_null()) {
+      Handle<Object> fun_name = JSFunction::GetDebugName(fun);
+      JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
+    }
+
     if (!constructor_key_.is_null()) {
       Handle<Object> is_constructor_obj = factory()->ToBoolean(is_constructor);
       JSObject::AddProperty(stack_frame, constructor_key_, is_constructor_obj,
                             NONE);
     }
+    return stack_frame;
+  }
+
+  Handle<JSObject> NewStackFrameObject(WasmFrame* frame) {
+    Handle<JSObject> stack_frame =
+        factory()->NewJSObject(isolate_->object_function());
+
+    if (!function_key_.is_null()) {
+      Handle<Object> fun_name = handle(frame->function_name(), isolate_);
+      if (fun_name->IsUndefined())
+        fun_name = isolate_->factory()->InternalizeUtf8String(
+            Vector<const char>("<WASM>"));
+      JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
+    }
+    // Encode the function index as line number.
+    if (!line_key_.is_null()) {
+      JSObject::AddProperty(
+          stack_frame, line_key_,
+          isolate_->factory()->NewNumberFromInt(frame->function_index()), NONE);
+    }
+    // Encode the byte offset as column.
+    if (!column_key_.is_null()) {
+      Code* code = frame->LookupCode();
+      int offset = static_cast<int>(frame->pc() - code->instruction_start());
+      int position = code->SourcePosition(offset);
+      JSObject::AddProperty(stack_frame, column_key_,
+                            isolate_->factory()->NewNumberFromInt(position),
+                            NONE);
+    }
 
     return stack_frame;
   }
@@ -683,29 +715,34 @@
   // Ensure no negative values.
   int limit = Max(frame_limit, 0);
   Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
+  Handle<FixedArray> stack_trace_elems(
+      FixedArray::cast(stack_trace->elements()), this);
 
-  StackTraceFrameIterator it(this);
   int frames_seen = 0;
-  while (!it.done() && (frames_seen < limit)) {
-    JavaScriptFrame* frame = it.frame();
-    // Set initial size to the maximum inlining level + 1 for the outermost
-    // function.
-    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-    frame->Summarize(&frames);
-    for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
-      Handle<JSFunction> fun = frames[i].function();
-      // Filter frames from other security contexts.
-      if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
-          !this->context()->HasSameSecurityTokenAs(fun->context())) continue;
-      int position =
-          frames[i].abstract_code()->SourcePosition(frames[i].code_offset());
-      Handle<JSObject> stack_frame =
-          helper.NewStackFrameObject(fun, position, frames[i].is_constructor());
-
-      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
+  for (StackTraceFrameIterator it(this); !it.done() && (frames_seen < limit);
+       it.Advance()) {
+    StandardFrame* frame = it.frame();
+    if (frame->is_java_script()) {
+      // Set initial size to the maximum inlining level + 1 for the outermost
+      // function.
+      List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+      JavaScriptFrame::cast(frame)->Summarize(&frames);
+      for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+        Handle<JSFunction> fun = frames[i].function();
+        // Filter frames from other security contexts.
+        if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
+            !this->context()->HasSameSecurityTokenAs(fun->context()))
+          continue;
+        Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(frames[i]);
+        stack_trace_elems->set(frames_seen, *new_frame_obj);
+        frames_seen++;
+      }
+    } else {
+      WasmFrame* wasm_frame = WasmFrame::cast(frame);
+      Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(wasm_frame);
+      stack_trace_elems->set(frames_seen, *new_frame_obj);
       frames_seen++;
     }
-    it.Advance();
   }
 
   stack_trace->set_length(Smi::FromInt(frames_seen));
@@ -1162,7 +1199,7 @@
         // position of the exception handler. The special builtin below will
         // take care of continuing to dispatch at that position. Also restore
         // the correct context for the handler from the interpreter register.
-        context = Context::cast(js_frame->GetInterpreterRegister(context_reg));
+        context = Context::cast(js_frame->ReadInterpreterRegister(context_reg));
         js_frame->PatchBytecodeOffset(static_cast<int>(offset));
         offset = 0;
 
@@ -1318,14 +1355,26 @@
   while (!it.done()) {
     HandleScope scope(this);
     // Find code position if recorded in relocation info.
-    JavaScriptFrame* frame = it.frame();
-    Code* code = frame->LookupCode();
-    int offset = static_cast<int>(frame->pc() - code->instruction_start());
-    int pos = frame->LookupCode()->SourcePosition(offset);
+    StandardFrame* frame = it.frame();
+    int pos;
+    if (frame->is_interpreted()) {
+      InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
+      pos = iframe->GetBytecodeArray()->SourcePosition(
+          iframe->GetBytecodeOffset());
+    } else if (frame->is_java_script()) {
+      Code* code = frame->LookupCode();
+      int offset = static_cast<int>(frame->pc() - code->instruction_start());
+      pos = frame->LookupCode()->SourcePosition(offset);
+    } else {
+      DCHECK(frame->is_wasm());
+      // TODO(clemensh): include wasm frames here
+      continue;
+    }
+    JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
     Handle<Object> pos_obj(Smi::FromInt(pos), this);
     // Fetch function and receiver.
-    Handle<JSFunction> fun(frame->function());
-    Handle<Object> recv(frame->receiver(), this);
+    Handle<JSFunction> fun(js_frame->function());
+    Handle<Object> recv(js_frame->receiver(), this);
     // Advance to the next JavaScript frame and determine if the
     // current frame is the top-level frame.
     it.Advance();
@@ -1340,31 +1389,29 @@
   }
 }
 
-
 bool Isolate::ComputeLocation(MessageLocation* target) {
   StackTraceFrameIterator it(this);
-  if (!it.done()) {
-    JavaScriptFrame* frame = it.frame();
-    JSFunction* fun = frame->function();
-    Object* script = fun->shared()->script();
-    if (script->IsScript() &&
-        !(Script::cast(script)->source()->IsUndefined())) {
-      Handle<Script> casted_script(Script::cast(script));
-      // Compute the location from the function and the relocation info of the
-      // baseline code. For optimized code this will use the deoptimization
-      // information to get canonical location information.
-      List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-      it.frame()->Summarize(&frames);
-      FrameSummary& summary = frames.last();
-      int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
-      *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
-      return true;
-    }
+  if (it.done()) return false;
+  StandardFrame* frame = it.frame();
+  // TODO(clemensh): handle wasm frames
+  if (!frame->is_java_script()) return false;
+  JSFunction* fun = JavaScriptFrame::cast(frame)->function();
+  Object* script = fun->shared()->script();
+  if (!script->IsScript() || (Script::cast(script)->source()->IsUndefined())) {
+    return false;
   }
-  return false;
+  Handle<Script> casted_script(Script::cast(script));
+  // Compute the location from the function and the relocation info of the
+  // baseline code. For optimized code this will use the deoptimization
+  // information to get canonical location information.
+  List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+  JavaScriptFrame::cast(frame)->Summarize(&frames);
+  FrameSummary& summary = frames.last();
+  int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
+  *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
+  return true;
 }
 
-
 bool Isolate::ComputeLocationFromException(MessageLocation* target,
                                            Handle<Object> exception) {
   if (!exception->IsJSObject()) return false;
@@ -1405,8 +1452,12 @@
   int elements_limit = Smi::cast(simple_stack_trace->length())->value();
 
   for (int i = 1; i < elements_limit; i += 4) {
-    Handle<JSFunction> fun =
-        handle(JSFunction::cast(elements->get(i + 1)), this);
+    Handle<Object> fun_obj = handle(elements->get(i + 1), this);
+    if (fun_obj->IsSmi()) {
+      // TODO(clemensh): handle wasm frames
+      return false;
+    }
+    Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
     if (!fun->shared()->IsSubjectToDebugging()) continue;
 
     Object* script = fun->shared()->script();
@@ -1798,7 +1849,6 @@
       runtime_profiler_(NULL),
       compilation_cache_(NULL),
       counters_(NULL),
-      code_range_(NULL),
       logger_(NULL),
       stats_table_(NULL),
       stub_cache_(NULL),
@@ -1809,7 +1859,6 @@
       capture_stack_trace_for_uncaught_exceptions_(false),
       stack_trace_for_uncaught_exceptions_frame_limit_(0),
       stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
-      memory_allocator_(NULL),
       keyed_lookup_cache_(NULL),
       context_slot_cache_(NULL),
       descriptor_lookup_cache_(NULL),
@@ -1845,6 +1894,7 @@
 #if TRACE_MAPS
       next_unique_sfi_id_(0),
 #endif
+      is_running_microtasks_(false),
       use_counter_callback_(NULL),
       basic_block_profiler_(NULL),
       cancelable_task_manager_(new CancelableTaskManager()),
@@ -2067,10 +2117,6 @@
   delete thread_manager_;
   thread_manager_ = NULL;
 
-  delete memory_allocator_;
-  memory_allocator_ = NULL;
-  delete code_range_;
-  code_range_ = NULL;
   delete global_handles_;
   global_handles_ = NULL;
   delete eternal_handles_;
@@ -2164,9 +2210,6 @@
   // The initialization process does not handle memory exhaustion.
   AlwaysAllocateScope always_allocate(this);
 
-  memory_allocator_ = new MemoryAllocator(this);
-  code_range_ = new CodeRange(this);
-
   // Safe after setting Heap::isolate_, and initializing StackGuard
   heap_.SetStackLimits();
 
@@ -2225,7 +2268,7 @@
     return false;
   }
 
-  deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
+  deoptimizer_data_ = new DeoptimizerData(heap()->memory_allocator());
 
   const bool create_heap_objects = (des == NULL);
   if (create_heap_objects && !heap_.CreateHeapObjects()) {
@@ -2247,7 +2290,8 @@
     set_event_logger(Logger::DefaultEventLoggerSentinel);
   }
 
-  if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
+  if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs || FLAG_trace_turbo ||
+      FLAG_trace_turbo_graph) {
     PrintF("Concurrent recompilation has been disabled for tracing.\n");
   } else if (OptimizingCompileDispatcher::Enabled()) {
     optimizing_compile_dispatcher_ = new OptimizingCompileDispatcher(this);
@@ -2262,8 +2306,7 @@
     des->Deserialize(this);
   }
   stub_cache_->Initialize();
-
-  if (FLAG_ignition) {
+  if (FLAG_ignition || serializer_enabled()) {
     interpreter_->Initialize();
   }
 
@@ -2487,6 +2530,31 @@
          CpuFeatures::SupportsCrankshaft();
 }
 
+bool Isolate::IsArrayOrObjectPrototype(Object* object) {
+  Object* context = heap()->native_contexts_list();
+  while (context != heap()->undefined_value()) {
+    Context* current_context = Context::cast(context);
+    if (current_context->initial_object_prototype() == object ||
+        current_context->initial_array_prototype() == object) {
+      return true;
+    }
+    context = current_context->next_context_link();
+  }
+  return false;
+}
+
+bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
+  DisallowHeapAllocation no_gc;
+  Object* context = heap()->native_contexts_list();
+  while (context != heap()->undefined_value()) {
+    Context* current_context = Context::cast(context);
+    if (current_context->get(index) == object) {
+      return true;
+    }
+    context = current_context->next_context_link();
+  }
+  return false;
+}
 
 bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
   PropertyCell* no_elements_cell = heap()->array_protector();
@@ -2547,52 +2615,72 @@
   return cell_reports_intact;
 }
 
-void Isolate::InvalidateArraySpeciesProtector() {
-  if (!FLAG_harmony_species) return;
-  DCHECK(factory()->species_protector()->value()->IsSmi());
-  DCHECK(IsArraySpeciesLookupChainIntact());
-  PropertyCell::SetValueWithInvalidation(
-      factory()->species_protector(),
-      handle(Smi::FromInt(kArrayProtectorInvalid), this));
-  DCHECK(!IsArraySpeciesLookupChainIntact());
+bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
+  Cell* is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
+  bool is_is_concat_spreadable_set =
+      Smi::cast(is_concat_spreadable_cell->value())->value() ==
+      kArrayProtectorInvalid;
+#ifdef DEBUG
+  Map* root_array_map = get_initial_js_array_map(GetInitialFastElementsKind());
+  if (root_array_map == NULL) {
+    // Ignore the value of is_concat_spreadable during bootstrap.
+    return !is_is_concat_spreadable_set;
+  }
+  Handle<Object> array_prototype(array_function()->prototype(), this);
+  Handle<Symbol> key = factory()->is_concat_spreadable_symbol();
+  Handle<Object> value;
+  LookupIterator it(array_prototype, key);
+  if (it.IsFound() && !JSReceiver::GetDataProperty(&it)->IsUndefined()) {
+    // TODO(cbruni): Currently we do not revert if we unset the
+    // @@isConcatSpreadable property on Array.prototype or Object.prototype
+    // hence the reverse implication doesn't hold.
+    DCHECK(is_is_concat_spreadable_set);
+    return false;
+  }
+#endif  // DEBUG
+
+  return !is_is_concat_spreadable_set;
 }
 
 void Isolate::UpdateArrayProtectorOnSetElement(Handle<JSObject> object) {
   DisallowHeapAllocation no_gc;
-  if (IsFastArrayConstructorPrototypeChainIntact() &&
-      object->map()->is_prototype_map()) {
-    Object* context = heap()->native_contexts_list();
-    while (!context->IsUndefined()) {
-      Context* current_context = Context::cast(context);
-      if (current_context->get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX) ==
-              *object ||
-          current_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ==
-              *object) {
-        CountUsage(v8::Isolate::UseCounterFeature::kArrayProtectorDirtied);
-        PropertyCell::SetValueWithInvalidation(
-            factory()->array_protector(),
-            handle(Smi::FromInt(kArrayProtectorInvalid), this));
-        break;
-      }
-      context = current_context->get(Context::NEXT_CONTEXT_LINK);
-    }
-  }
+  if (!object->map()->is_prototype_map()) return;
+  if (!IsFastArrayConstructorPrototypeChainIntact()) return;
+  if (!IsArrayOrObjectPrototype(*object)) return;
+  PropertyCell::SetValueWithInvalidation(
+      factory()->array_protector(),
+      handle(Smi::FromInt(kArrayProtectorInvalid), this));
 }
 
+void Isolate::InvalidateHasInstanceProtector() {
+  DCHECK(factory()->has_instance_protector()->value()->IsSmi());
+  DCHECK(IsHasInstanceLookupChainIntact());
+  PropertyCell::SetValueWithInvalidation(
+      factory()->has_instance_protector(),
+      handle(Smi::FromInt(kArrayProtectorInvalid), this));
+  DCHECK(!IsHasInstanceLookupChainIntact());
+}
+
+void Isolate::InvalidateIsConcatSpreadableProtector() {
+  DCHECK(factory()->is_concat_spreadable_protector()->value()->IsSmi());
+  DCHECK(IsIsConcatSpreadableLookupChainIntact());
+  factory()->is_concat_spreadable_protector()->set_value(
+      Smi::FromInt(kArrayProtectorInvalid));
+  DCHECK(!IsIsConcatSpreadableLookupChainIntact());
+}
+
+void Isolate::InvalidateArraySpeciesProtector() {
+  if (!FLAG_harmony_species) return;
+  DCHECK(factory()->species_protector()->value()->IsSmi());
+  DCHECK(IsArraySpeciesLookupChainIntact());
+  factory()->species_protector()->set_value(
+      Smi::FromInt(kArrayProtectorInvalid));
+  DCHECK(!IsArraySpeciesLookupChainIntact());
+}
 
 bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
-  if (array->map()->is_prototype_map()) {
-    Object* context = heap()->native_contexts_list();
-    while (!context->IsUndefined()) {
-      Context* current_context = Context::cast(context);
-      if (current_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ==
-          *array) {
-        return true;
-      }
-      context = current_context->get(Context::NEXT_CONTEXT_LINK);
-    }
-  }
-  return false;
+  DisallowHeapAllocation no_gc;
+  return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
 }
 
 
@@ -2758,7 +2846,9 @@
   // Increase call depth to prevent recursive callbacks.
   v8::Isolate::SuppressMicrotaskExecutionScope suppress(
       reinterpret_cast<v8::Isolate*>(this));
+  is_running_microtasks_ = true;
   RunMicrotasksInternal();
+  is_running_microtasks_ = false;
   FireMicrotasksCompletedCallback();
 }
 
@@ -2917,7 +3007,7 @@
       DCHECK(detached_contexts->get(i + 1)->IsWeakCell());
       WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
       if (mark_sweeps > 3) {
-        PrintF("detached context 0x%p\n survived %d GCs (leak?)\n",
+        PrintF("detached context %p\n survived %d GCs (leak?)\n",
                static_cast<void*>(cell->value()), mark_sweeps);
       }
     }
diff --git a/src/isolate.h b/src/isolate.h
index 8847164..5895ebb 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -58,7 +58,6 @@
 class ExternalCallbackScope;
 class ExternalReferenceTable;
 class Factory;
-class FunctionInfoListener;
 class HandleScopeImplementer;
 class HeapProfiler;
 class HStatistics;
@@ -383,8 +382,6 @@
   /* function cache of the native context. */                                  \
   V(int, next_serial_number, 0)                                                \
   V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL)  \
-  /* Part of the state of liveedit. */                                         \
-  V(FunctionInfoListener*, active_function_info_listener, NULL)                \
   /* State for Relocatable. */                                                 \
   V(Relocatable*, relocatable_top, NULL)                                       \
   V(DebugObjectCache*, string_stream_debug_object_cache, NULL)                 \
@@ -495,14 +492,6 @@
     return isolate;
   }
 
-  // Like Current, but skips the check that |isolate_key_| was initialized.
-  // Callers have to ensure that themselves.
-  // DO NOT USE. The only remaining callsite will be deleted soon.
-  INLINE(static Isolate* UnsafeCurrent()) {
-    return reinterpret_cast<Isolate*>(
-        base::Thread::GetThreadLocal(isolate_key_));
-  }
-
   // Usually called by Init(), but can be called early e.g. to allow
   // testing components that require logging but not the whole
   // isolate.
@@ -633,9 +622,7 @@
   inline Handle<JSGlobalObject> global_object();
 
   // Returns the global proxy object of the current context.
-  JSObject* global_proxy() {
-    return context()->global_proxy();
-  }
+  inline Handle<JSObject> global_proxy();
 
   static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
   void FreeThreadResources() { thread_local_top_.Free(); }
@@ -701,7 +688,7 @@
   void ReportFailedAccessCheck(Handle<JSObject> receiver);
 
   // Exception throwing support. The caller should use the result
-  // of Throw() as its return value.
+  // of Throw() as its return vaue.
   Object* Throw(Object* exception, MessageLocation* location = NULL);
   Object* ThrowIllegalOperation();
 
@@ -817,7 +804,6 @@
     DCHECK(counters_ != NULL);
     return counters_;
   }
-  CodeRange* code_range() { return code_range_; }
   RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
   CompilationCache* compilation_cache() { return compilation_cache_; }
   Logger* logger() {
@@ -841,10 +827,6 @@
     return materialized_object_store_;
   }
 
-  MemoryAllocator* memory_allocator() {
-    return memory_allocator_;
-  }
-
   KeyedLookupCache* keyed_lookup_cache() {
     return keyed_lookup_cache_;
   }
@@ -974,6 +956,8 @@
 
   bool IsFastArrayConstructorPrototypeChainIntact();
   inline bool IsArraySpeciesLookupChainIntact();
+  inline bool IsHasInstanceLookupChainIntact();
+  bool IsIsConcatSpreadableLookupChainIntact();
 
   // On intent to set an element in object, make sure that appropriate
   // notifications occur if the set is on the elements of the array or
@@ -990,6 +974,8 @@
     UpdateArrayProtectorOnSetElement(object);
   }
   void InvalidateArraySpeciesProtector();
+  void InvalidateHasInstanceProtector();
+  void InvalidateIsConcatSpreadableProtector();
 
   // Returns true if array is the initial array prototype in any native context.
   bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1077,6 +1063,7 @@
 
   void EnqueueMicrotask(Handle<Object> microtask);
   void RunMicrotasks();
+  bool IsRunningMicrotasks() const { return is_running_microtasks_; }
 
   void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
   void CountUsage(v8::Isolate::UseCounterFeature feature);
@@ -1121,8 +1108,11 @@
 
   base::AccountingAllocator* allocator() { return &allocator_; }
 
+  bool IsInAnyContext(Object* object, uint32_t index);
+
  protected:
   explicit Isolate(bool enable_serializer);
+  bool IsArrayOrObjectPrototype(Object* object);
 
  private:
   friend struct GlobalState;
@@ -1240,7 +1230,6 @@
   RuntimeProfiler* runtime_profiler_;
   CompilationCache* compilation_cache_;
   Counters* counters_;
-  CodeRange* code_range_;
   base::RecursiveMutex break_access_;
   Logger* logger_;
   StackGuard stack_guard_;
@@ -1254,7 +1243,6 @@
   bool capture_stack_trace_for_uncaught_exceptions_;
   int stack_trace_for_uncaught_exceptions_frame_limit_;
   StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
-  MemoryAllocator* memory_allocator_;
   KeyedLookupCache* keyed_lookup_cache_;
   ContextSlotCache* context_slot_cache_;
   DescriptorLookupCache* descriptor_lookup_cache_;
@@ -1358,6 +1346,7 @@
 
   // List of callbacks after microtasks were run.
   List<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
+  bool is_running_microtasks_;
 
   v8::Isolate::UseCounterCallback use_counter_callback_;
   BasicBlockProfiler* basic_block_profiler_;
@@ -1381,8 +1370,6 @@
   friend class Simulator;
   friend class StackGuard;
   friend class ThreadId;
-  friend class TestMemoryAllocatorScope;
-  friend class TestCodeRangeScope;
   friend class v8::Isolate;
   friend class v8::Locker;
   friend class v8::Unlocker;
@@ -1499,6 +1486,15 @@
   Isolate* isolate_;
 };
 
+#define STACK_CHECK(isolate, result_value)               \
+  do {                                                   \
+    StackLimitCheck stack_check(isolate);                \
+    if (stack_check.HasOverflowed()) {                   \
+      isolate->Throw(*isolate->factory()->NewRangeError( \
+          MessageTemplate::kStackOverflow));             \
+      return result_value;                               \
+    }                                                    \
+  } while (false)
 
 // Support for temporarily postponing interrupts. When the outermost
 // postpone scope is left the interrupts will be re-enabled and any
diff --git a/src/js/array.js b/src/js/array.js
index 1406df3..0a77b23 100644
--- a/src/js/array.js
+++ b/src/js/array.js
@@ -11,7 +11,6 @@
 // -------------------------------------------------------------------
 // Imports
 
-var AddIndexedProperty;
 var FLAG_harmony_species;
 var GetIterator;
 var GetMethod;
@@ -21,27 +20,18 @@
 var MakeTypeError;
 var MaxSimple;
 var MinSimple;
-var ObjectDefineProperty;
 var ObjectHasOwnProperty;
 var ObjectToString = utils.ImportNow("object_to_string");
-var ObserveBeginPerformSplice;
-var ObserveEndPerformSplice;
-var ObserveEnqueueSpliceRecord;
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
 var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
 
 utils.Import(function(from) {
-  AddIndexedProperty = from.AddIndexedProperty;
   GetIterator = from.GetIterator;
   GetMethod = from.GetMethod;
   MakeTypeError = from.MakeTypeError;
   MaxSimple = from.MaxSimple;
   MinSimple = from.MinSimple;
-  ObjectDefineProperty = from.ObjectDefineProperty;
   ObjectHasOwnProperty = from.ObjectHasOwnProperty;
-  ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
-  ObserveEndPerformSplice = from.ObserveEndPerformSplice;
-  ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
 });
 
 utils.ImportFromExperimental(function(from) {
@@ -53,6 +43,9 @@
 
 function ArraySpeciesCreate(array, length) {
   var constructor;
+
+  length = INVERT_NEG_ZERO(length);
+
   if (FLAG_harmony_species) {
     constructor = %ArraySpeciesConstructor(array);
   } else {
@@ -62,17 +55,6 @@
 }
 
 
-function DefineIndexedProperty(array, i, value) {
-  if (FLAG_harmony_species) {
-    var result = ObjectDefineProperty(array, i, {
-      value: value, writable: true, configurable: true, enumerable: true
-    });
-    if (!result) throw MakeTypeError(kStrictCannotAssign, i);
-  } else {
-    AddIndexedProperty(array, i, value);
-  }
-}
-
 function KeySortCompare(a, b) {
   return a - b;
 }
@@ -123,8 +105,7 @@
   // Only use the sparse variant on arrays that are likely to be sparse and the
   // number of elements touched in the operation is relatively small compared to
   // the overall size of the array.
-  if (!is_array || length < 1000 || %IsObserved(array) ||
-      %HasComplexElements(array)) {
+  if (!is_array || length < 1000 || %HasComplexElements(array)) {
     return false;
   }
   if (!%_IsSmi(length)) {
@@ -265,7 +246,7 @@
     for (var i = start_i; i < limit; ++i) {
       var current = array[i];
       if (!IS_UNDEFINED(current) || i in array) {
-        DefineIndexedProperty(deleted_elements, i - start_i, current);
+        %CreateDataProperty(deleted_elements, i - start_i, current);
       }
     }
   } else {
@@ -275,7 +256,7 @@
       if (key >= start_i) {
         var current = array[key];
         if (!IS_UNDEFINED(current) || key in array) {
-          DefineIndexedProperty(deleted_elements, key - start_i, current);
+          %CreateDataProperty(deleted_elements, key - start_i, current);
         }
       }
     }
@@ -352,7 +333,7 @@
     var index = start_i + i;
     if (HAS_INDEX(array, index, is_array)) {
       var current = array[index];
-      DefineIndexedProperty(deleted_elements, i, current);
+      %CreateDataProperty(deleted_elements, i, current);
     }
   }
 }
@@ -456,23 +437,6 @@
 }
 
 
-function ObservedArrayPop(n) {
-  n--;
-  var value = this[n];
-
-  try {
-    ObserveBeginPerformSplice(this);
-    delete this[n];
-    this.length = n;
-  } finally {
-    ObserveEndPerformSplice(this);
-    ObserveEnqueueSpliceRecord(this, n, [value], 0);
-  }
-
-  return value;
-}
-
-
 // Removes the last element from the array and returns it. See
 // ECMA-262, section 15.4.4.6.
 function ArrayPop() {
@@ -485,9 +449,6 @@
     return;
   }
 
-  if (%IsObserved(array))
-    return ObservedArrayPop.call(array, n);
-
   n--;
   var value = array[n];
   %DeleteProperty_Strict(array, n);
@@ -496,46 +457,19 @@
 }
 
 
-function ObservedArrayPush() {
-  var n = TO_LENGTH(this.length);
-  var m = arguments.length;
-
-  try {
-    ObserveBeginPerformSplice(this);
-    for (var i = 0; i < m; i++) {
-      this[i+n] = arguments[i];
-    }
-    var new_length = n + m;
-    this.length = new_length;
-  } finally {
-    ObserveEndPerformSplice(this);
-    ObserveEnqueueSpliceRecord(this, n, [], m);
-  }
-
-  return new_length;
-}
-
-
 // Appends the arguments to the end of the array and returns the new
 // length of the array. See ECMA-262, section 15.4.4.7.
 function ArrayPush() {
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.push");
 
-  if (%IsObserved(this))
-    return ObservedArrayPush.apply(this, arguments);
-
   var array = TO_OBJECT(this);
   var n = TO_LENGTH(array.length);
   var m = arguments.length;
 
-  // It appears that there is no enforced, absolute limit on the number of
-  // arguments, but it would surely blow the stack to use 2**30 or more.
-  // To avoid integer overflow, do the comparison to the max safe integer
-  // after subtracting 2**30 from both sides. (2**31 would seem like a
-  // natural value, but it is negative in JS, and 2**32 is 1.)
-  if (m > (1 << 30) || (n - (1 << 30)) + m > kMaxSafeInteger - (1 << 30)) {
-    throw MakeTypeError(kPushPastSafeLength, m, n);
-  }
+  // Subtract n from kMaxSafeInteger rather than testing m + n >
+  // kMaxSafeInteger. n may already be kMaxSafeInteger. In that case adding
+  // e.g., 1 would not be safe.
+  if (m > kMaxSafeInteger - n) throw MakeTypeError(kPushPastSafeLength, m, n);
 
   for (var i = 0; i < m; i++) {
     array[i+n] = arguments[i];
@@ -646,22 +580,6 @@
 }
 
 
-function ObservedArrayShift(len) {
-  var first = this[0];
-
-  try {
-    ObserveBeginPerformSplice(this);
-    SimpleMove(this, 0, 1, len, 0);
-    this.length = len - 1;
-  } finally {
-    ObserveEndPerformSplice(this);
-    ObserveEnqueueSpliceRecord(this, 0, [first], 0);
-  }
-
-  return first;
-}
-
-
 function ArrayShift() {
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
 
@@ -675,9 +593,6 @@
 
   if (%object_is_sealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
 
-  if (%IsObserved(array))
-    return ObservedArrayShift.call(array, len);
-
   var first = array[0];
 
   if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
@@ -692,33 +607,9 @@
 }
 
 
-function ObservedArrayUnshift() {
-  var len = TO_LENGTH(this.length);
-  var num_arguments = arguments.length;
-
-  try {
-    ObserveBeginPerformSplice(this);
-    SimpleMove(this, 0, 0, len, num_arguments);
-    for (var i = 0; i < num_arguments; i++) {
-      this[i] = arguments[i];
-    }
-    var new_length = len + num_arguments;
-    this.length = new_length;
-  } finally {
-    ObserveEndPerformSplice(this);
-    ObserveEnqueueSpliceRecord(this, 0, [], num_arguments);
-  }
-
-  return new_length;
-}
-
-
 function ArrayUnshift(arg1) {  // length == 1
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.unshift");
 
-  if (%IsObserved(this))
-    return ObservedArrayUnshift.apply(this, arguments);
-
   var array = TO_OBJECT(this);
   var len = TO_LENGTH(array.length);
   var num_arguments = arguments.length;
@@ -813,53 +704,9 @@
 }
 
 
-function ObservedArraySplice(start, delete_count) {
-  var num_arguments = arguments.length;
-  var len = TO_LENGTH(this.length);
-  var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
-  var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
-                                           start_i);
-  var deleted_elements = [];
-  deleted_elements.length = del_count;
-  var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
-
-  try {
-    ObserveBeginPerformSplice(this);
-
-    SimpleSlice(this, start_i, del_count, len, deleted_elements);
-    SimpleMove(this, start_i, del_count, len, num_elements_to_add);
-
-    // Insert the arguments into the resulting array in
-    // place of the deleted elements.
-    var i = start_i;
-    var arguments_index = 2;
-    var arguments_length = arguments.length;
-    while (arguments_index < arguments_length) {
-      this[i++] = arguments[arguments_index++];
-    }
-    this.length = len - del_count + num_elements_to_add;
-
-  } finally {
-    ObserveEndPerformSplice(this);
-    if (deleted_elements.length || num_elements_to_add) {
-      ObserveEnqueueSpliceRecord(this,
-                                 start_i,
-                                 deleted_elements.slice(),
-                                 num_elements_to_add);
-    }
-  }
-
-  // Return the deleted elements.
-  return deleted_elements;
-}
-
-
 function ArraySplice(start, delete_count) {
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.splice");
 
-  if (%IsObserved(this))
-    return ObservedArraySplice.apply(this, arguments);
-
   var num_arguments = arguments.length;
   var array = TO_OBJECT(this);
   var len = TO_LENGTH(array.length);
@@ -1048,7 +895,8 @@
   // of a prototype property.
   var CopyFromPrototype = function CopyFromPrototype(obj, length) {
     var max = 0;
-    for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
+    for (var proto = %object_get_prototype_of(obj); proto;
+         proto = %object_get_prototype_of(proto)) {
       var indices = IS_PROXY(proto) ? length : %GetArrayKeys(proto, length);
       if (IS_NUMBER(indices)) {
         // It's an interval.
@@ -1076,7 +924,8 @@
   // where a prototype of obj has an element. I.e., shadow all prototype
   // elements in that range.
   var ShadowPrototypeElements = function(obj, from, to) {
-    for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
+    for (var proto = %object_get_prototype_of(obj); proto;
+         proto = %object_get_prototype_of(proto)) {
       var indices = IS_PROXY(proto) ? to : %GetArrayKeys(proto, to);
       if (IS_NUMBER(indices)) {
         // It's an interval.
@@ -1143,7 +992,7 @@
     }
     for (i = length - num_holes; i < length; i++) {
       // For compatability with Webkit, do not expose elements in the prototype.
-      if (i in %_GetPrototype(obj)) {
+      if (i in %object_get_prototype_of(obj)) {
         obj[i] = UNDEFINED;
       } else {
         delete obj[i];
@@ -1174,9 +1023,9 @@
   var num_non_undefined = %RemoveArrayHoles(array, length);
 
   if (num_non_undefined == -1) {
-    // The array is observed, or there were indexed accessors in the array.
+    // There were indexed accessors in the array.
     // Move array holes and undefineds to the end using a Javascript function
-    // that is safe in the presence of accessors and is observable.
+    // that is safe in the presence of accessors.
     num_non_undefined = SafeRemoveArrayHoles(array);
   }
 
@@ -1211,7 +1060,7 @@
     if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
       if (%_Call(f, receiver, element, i, array)) {
-        DefineIndexedProperty(result, result_length, element);
+        %CreateDataProperty(result, result_length, element);
         result_length++;
       }
     }
@@ -1331,7 +1180,7 @@
   for (var i = 0; i < length; i++) {
     if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
-      DefineIndexedProperty(result, i, %_Call(f, receiver, element, i, array));
+      %CreateDataProperty(result, i, %_Call(f, receiver, element, i, array));
     }
   }
   return result;
@@ -1347,7 +1196,7 @@
   if (IS_UNDEFINED(index)) {
     index = 0;
   } else {
-    index = TO_INTEGER(index) + 0;  // Add 0 to convert -0 to 0
+    index = INVERT_NEG_ZERO(TO_INTEGER(index));
     // If index is negative, index from the end of the array.
     if (index < 0) {
       index = length + index;
@@ -1409,7 +1258,7 @@
   if (argumentsLength < 2) {
     index = length - 1;
   } else {
-    index = TO_INTEGER(index) + 0;  // Add 0 to convert -0 to 0
+    index = INVERT_NEG_ZERO(TO_INTEGER(index));
     // If index is negative, index from end of the array.
     if (index < 0) index += length;
     // If index is still negative, do not search the array.
@@ -1736,17 +1585,6 @@
 }
 
 
-function AddArrayElement(constructor, array, i, value) {
-  if (constructor === GlobalArray) {
-    AddIndexedProperty(array, i, value);
-  } else {
-    ObjectDefineProperty(array, i, {
-      value: value, writable: true, configurable: true, enumerable: true
-    });
-  }
-}
-
-
 // ES6, draft 10-14-14, section 22.1.2.1
 function ArrayFrom(arrayLike, mapfn, receiver) {
   var items = TO_OBJECT(arrayLike);
@@ -1775,7 +1613,7 @@
       } else {
         mappedValue = nextValue;
       }
-      AddArrayElement(this, result, k, mappedValue);
+      %CreateDataProperty(result, k, mappedValue);
       k++;
     }
     result.length = k;
@@ -1791,7 +1629,7 @@
       } else {
         mappedValue = nextValue;
       }
-      AddArrayElement(this, result, k, mappedValue);
+      %CreateDataProperty(result, k, mappedValue);
     }
 
     result.length = k;
@@ -1807,7 +1645,7 @@
   // TODO: Implement IsConstructor (ES6 section 7.2.5)
   var array = %IsConstructor(constructor) ? new constructor(length) : [];
   for (var i = 0; i < length; i++) {
-    AddArrayElement(constructor, array, i, args[i]);
+    %CreateDataProperty(array, i, args[i]);
   }
   array.length = length;
   return array;
diff --git a/src/js/arraybuffer.js b/src/js/arraybuffer.js
index f0273c7..e739960 100644
--- a/src/js/arraybuffer.js
+++ b/src/js/arraybuffer.js
@@ -70,7 +70,9 @@
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         'ArrayBuffer.prototype.slice', result);
   }
-  // TODO(littledan): Check for a detached ArrayBuffer
+  // Checks for detached source/target ArrayBuffers are done inside of
+  // %ArrayBufferSliceImpl; the reordering of checks does not violate
+  // the spec because all exceptions thrown are TypeErrors.
   if (result === this) {
     throw MakeTypeError(kArrayBufferSpeciesThis);
   }
diff --git a/src/js/generator.js b/src/js/generator.js
deleted file mode 100644
index 3dcdcc0..0000000
--- a/src/js/generator.js
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GeneratorFunctionPrototype = utils.ImportNow("GeneratorFunctionPrototype");
-var GeneratorFunction = utils.ImportNow("GeneratorFunction");
-var GlobalFunction = global.Function;
-var MakeTypeError;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-utils.Import(function(from) {
-  MakeTypeError = from.MakeTypeError;
-});
-
-// ----------------------------------------------------------------------------
-
-// Generator functions and objects are specified by ES6, sections 15.19.3 and
-// 15.19.4.
-
-function GeneratorObjectNext(value) {
-  if (!IS_GENERATOR(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        '[Generator].prototype.next', this);
-  }
-
-  var continuation = %GeneratorGetContinuation(this);
-  if (continuation > 0) {
-    // Generator is suspended.
-    DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
-    return %_GeneratorNext(this, value);
-  } else if (continuation == 0) {
-    // Generator is already closed.
-    return %_CreateIterResultObject(UNDEFINED, true);
-  } else {
-    // Generator is running.
-    throw MakeTypeError(kGeneratorRunning);
-  }
-}
-
-
-function GeneratorObjectReturn(value) {
-  if (!IS_GENERATOR(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        '[Generator].prototype.return', this);
-  }
-
-  var continuation = %GeneratorGetContinuation(this);
-  if (continuation > 0) {
-    // Generator is suspended.
-    DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
-    return %_GeneratorReturn(this, value);
-  } else if (continuation == 0) {
-    // Generator is already closed.
-    return %_CreateIterResultObject(value, true);
-  } else {
-    // Generator is running.
-    throw MakeTypeError(kGeneratorRunning);
-  }
-}
-
-
-function GeneratorObjectThrow(exn) {
-  if (!IS_GENERATOR(this)) {
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        '[Generator].prototype.throw', this);
-  }
-
-  var continuation = %GeneratorGetContinuation(this);
-  if (continuation > 0) {
-    // Generator is suspended.
-    DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
-    return %_GeneratorThrow(this, exn);
-  } else if (continuation == 0) {
-    // Generator is already closed.
-    throw exn;
-  } else {
-    // Generator is running.
-    throw MakeTypeError(kGeneratorRunning);
-  }
-}
-
-// ----------------------------------------------------------------------------
-
-// None of the three resume operations (Runtime_GeneratorNext,
-// Runtime_GeneratorReturn, Runtime_GeneratorThrow) is supported by
-// Crankshaft or TurboFan.  Disable optimization of wrappers here.
-%NeverOptimizeFunction(GeneratorObjectNext);
-%NeverOptimizeFunction(GeneratorObjectReturn);
-%NeverOptimizeFunction(GeneratorObjectThrow);
-
-// Set up non-enumerable functions on the generator prototype object.
-var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
-utils.InstallFunctions(GeneratorObjectPrototype,
-                       DONT_ENUM,
-                      ["next", GeneratorObjectNext,
-                       "return", GeneratorObjectReturn,
-                       "throw", GeneratorObjectThrow]);
-
-%AddNamedProperty(GeneratorObjectPrototype, "constructor",
-    GeneratorFunctionPrototype, DONT_ENUM | READ_ONLY);
-%AddNamedProperty(GeneratorObjectPrototype,
-    toStringTagSymbol, "Generator", DONT_ENUM | READ_ONLY);
-%InternalSetPrototype(GeneratorFunctionPrototype, GlobalFunction.prototype);
-%AddNamedProperty(GeneratorFunctionPrototype,
-    toStringTagSymbol, "GeneratorFunction", DONT_ENUM | READ_ONLY);
-%AddNamedProperty(GeneratorFunctionPrototype, "constructor",
-    GeneratorFunction, DONT_ENUM | READ_ONLY);
-%InternalSetPrototype(GeneratorFunction, GlobalFunction);
-
-})
diff --git a/src/js/harmony-async-await.js b/src/js/harmony-async-await.js
new file mode 100644
index 0000000..c6705ef
--- /dev/null
+++ b/src/js/harmony-async-await.js
@@ -0,0 +1,43 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils, extrasUtils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var AsyncFunctionNext;
+var AsyncFunctionThrow;
+var PromiseReject;
+var PromiseResolve;
+var PromiseThen;
+
+utils.Import(function(from) {
+  AsyncFunctionNext = from.AsyncFunctionNext;
+  AsyncFunctionThrow = from.AsyncFunctionThrow;
+  PromiseReject = from.PromiseCreateRejected;
+  PromiseResolve = from.PromiseCreateResolved;
+  PromiseThen = from.PromiseThen;
+});
+
+// -------------------------------------------------------------------
+
+function AsyncFunctionAwait(generator, value) {
+  return %_Call(
+      PromiseThen, PromiseResolve(value),
+      function(sentValue) {
+        return %_Call(AsyncFunctionNext, generator, sentValue);
+      },
+      function(sentError) {
+        return %_Call(AsyncFunctionThrow, generator, sentError);
+      });
+}
+
+%InstallToContext([ "async_function_await", AsyncFunctionAwait ]);
+
+})
diff --git a/src/js/harmony-atomics.js b/src/js/harmony-atomics.js
index 9f80227..8372903 100644
--- a/src/js/harmony-atomics.js
+++ b/src/js/harmony-atomics.js
@@ -62,19 +62,6 @@
   return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
 }
 
-function AtomicsLoadJS(sta, index) {
-  CheckSharedIntegerTypedArray(sta);
-  index = ValidateIndex(index, %_TypedArrayGetLength(sta));
-  return %_AtomicsLoad(sta, index);
-}
-
-function AtomicsStoreJS(sta, index, value) {
-  CheckSharedIntegerTypedArray(sta);
-  index = ValidateIndex(index, %_TypedArrayGetLength(sta));
-  value = TO_NUMBER(value);
-  return %_AtomicsStore(sta, index, value);
-}
-
 function AtomicsAddJS(ia, index, value) {
   CheckSharedIntegerTypedArray(ia);
   index = ValidateIndex(index, %_TypedArrayGetLength(ia));
@@ -161,13 +148,9 @@
 
 // -------------------------------------------------------------------
 
-function AtomicsConstructor() {}
+var Atomics = global.Atomics;
 
-var Atomics = new AtomicsConstructor();
-
-%InternalSetPrototype(Atomics, GlobalObject.prototype);
-%AddNamedProperty(global, "Atomics", Atomics, DONT_ENUM);
-%FunctionSetInstanceClassName(AtomicsConstructor, 'Atomics');
+// The Atomics global is defined by the bootstrapper.
 
 %AddNamedProperty(Atomics, toStringTagSymbol, "Atomics", READ_ONLY | DONT_ENUM);
 
@@ -179,9 +162,9 @@
 ]);
 
 utils.InstallFunctions(Atomics, DONT_ENUM, [
+  // TODO(binji): remove the rest of the (non futex) Atomics functions as they
+  // become builtins.
   "compareExchange", AtomicsCompareExchangeJS,
-  "load", AtomicsLoadJS,
-  "store", AtomicsStoreJS,
   "add", AtomicsAddJS,
   "sub", AtomicsSubJS,
   "and", AtomicsAndJS,
diff --git a/src/js/harmony-object-observe.js b/src/js/harmony-object-observe.js
deleted file mode 100644
index 95dd298..0000000
--- a/src/js/harmony-object-observe.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var ObserveArrayMethods = utils.ImportNow("ObserveArrayMethods");
-var ObserveObjectMethods = utils.ImportNow("ObserveObjectMethods");;
-
-utils.InstallFunctions(global.Object, DONT_ENUM, ObserveObjectMethods);
-utils.InstallFunctions(global.Array, DONT_ENUM, ObserveArrayMethods);
-
-})
diff --git a/src/js/harmony-string-padding.js b/src/js/harmony-string-padding.js
index a6c6c47..dc59823 100644
--- a/src/js/harmony-string-padding.js
+++ b/src/js/harmony-string-padding.js
@@ -30,7 +30,8 @@
   } else {
     fillString = TO_STRING(fillString);
     if (fillString === "") {
-      fillString = " ";
+      // If filler is the empty String, return S.
+      return "";
     }
   }
 
diff --git a/src/js/i18n.js b/src/js/i18n.js
index 845289a..7c9535b 100644
--- a/src/js/i18n.js
+++ b/src/js/i18n.js
@@ -20,6 +20,7 @@
 var ArrayIndexOf;
 var ArrayJoin;
 var ArrayPush;
+var FLAG_intl_extra;
 var GlobalBoolean = global.Boolean;
 var GlobalDate = global.Date;
 var GlobalNumber = global.Number;
@@ -27,7 +28,7 @@
 var GlobalString = global.String;
 var InstallFunctions = utils.InstallFunctions;
 var InstallGetter = utils.InstallGetter;
-var InternalPackedArray = utils.InternalPackedArray;
+var InternalArray = utils.InternalArray;
 var InternalRegExpMatch;
 var InternalRegExpReplace
 var IsFinite;
@@ -35,8 +36,6 @@
 var MakeError;
 var MakeRangeError;
 var MakeTypeError;
-var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
-var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
 var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
 var OverrideFunction = utils.OverrideFunction;
 var patternSymbol = utils.ImportNow("intl_pattern_symbol");
@@ -66,6 +65,10 @@
   StringSubstring = from.StringSubstring;
 });
 
+utils.ImportFromExperimental(function(from) {
+  FLAG_intl_extra = from.FLAG_intl_extra;
+});
+
 // Utilities for definitions
 
 function InstallFunction(object, name, func) {
@@ -84,11 +87,11 @@
 /**
  * Adds bound method to the prototype of the given object.
  */
-function AddBoundMethod(obj, methodName, implementation, length) {
+function AddBoundMethod(obj, methodName, implementation, length, type) {
   %CheckIsBootstrapping();
   var internalName = %CreatePrivateSymbol(methodName);
   var getter = function() {
-    if (!%IsInitializedIntlObject(this)) {
+    if (!%IsInitializedIntlObjectOfType(this, type)) {
       throw MakeTypeError(kMethodCalledOnWrongObject, methodName);
     }
     if (IS_UNDEFINED(this[internalName])) {
@@ -144,6 +147,13 @@
  */
 var DEFAULT_ICU_LOCALE = UNDEFINED;
 
+function GetDefaultICULocaleJS() {
+  if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
+    DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
+  }
+  return DEFAULT_ICU_LOCALE;
+}
+
 /**
  * Unicode extension regular expression.
  */
@@ -307,7 +317,7 @@
  * Locales appear in the same order in the returned list as in the input list.
  */
 function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
-  var matchedLocales = [];
+  var matchedLocales = new InternalArray();
   for (var i = 0; i < requestedLocales.length; ++i) {
     // Remove -u- extension.
     var locale = InternalRegExpReplace(
@@ -448,11 +458,7 @@
   }
 
   // Didn't find a match, return default.
-  if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
-    DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
-  }
-
-  return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
+  return {'locale': GetDefaultICULocaleJS(), 'extension': '', 'position': -1};
 }
 
 
@@ -567,21 +573,23 @@
 
 
 /**
- * Converts all OwnProperties into
+ * Given an array-like, outputs an Array with the numbered
+ * properties copied over and defined
  * configurable: false, writable: false, enumerable: true.
  */
-function freezeArray(array) {
-  var l = array.length;
+function freezeArray(input) {
+  var array = [];
+  var l = input.length;
   for (var i = 0; i < l; i++) {
-    if (i in array) {
-      ObjectDefineProperty(array, i, {value: array[i],
-                                      configurable: false,
-                                      writable: false,
-                                      enumerable: true});
+    if (i in input) {
+      %object_define_property(array, i, {value: input[i],
+                                         configurable: false,
+                                         writable: false,
+                                         enumerable: true});
     }
   }
 
-  ObjectDefineProperty(array, 'length', {value: l, writable: false});
+  %object_define_property(array, 'length', {value: l, writable: false});
   return array;
 }
 
@@ -643,8 +651,8 @@
  * Configurable is false by default.
  */
 function defineWEProperty(object, property, value) {
-  ObjectDefineProperty(object, property,
-                       {value: value, writable: true, enumerable: true});
+  %object_define_property(object, property,
+                          {value: value, writable: true, enumerable: true});
 }
 
 
@@ -663,10 +671,10 @@
  * Defines a property and sets writable, enumerable and configurable to true.
  */
 function defineWECProperty(object, property, value) {
-  ObjectDefineProperty(object, property, {value: value,
-                                          writable: true,
-                                          enumerable: true,
-                                          configurable: true});
+  %object_define_property(object, property, {value: value,
+                                             writable: true,
+                                             enumerable: true,
+                                             configurable: true});
 }
 
 
@@ -722,21 +730,24 @@
  */
 function canonicalizeLanguageTag(localeID) {
   // null is typeof 'object' so we have to do extra check.
-  if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
+  if ((!IS_STRING(localeID) && !IS_RECEIVER(localeID)) ||
       IS_NULL(localeID)) {
     throw MakeTypeError(kLanguageID);
   }
 
+  // Optimize for the most common case; a language code alone in
+  // the canonical form/lowercase (e.g. "en", "fil").
+  if (IS_STRING(localeID) &&
+      !IS_NULL(InternalRegExpMatch(/^[a-z]{2,3}$/, localeID))) {
+    return localeID;
+  }
+
   var localeString = GlobalString(localeID);
 
   if (isValidLanguageTag(localeString) === false) {
     throw MakeRangeError(kInvalidLanguageTag, localeString);
   }
 
-  // This call will strip -kn but not -kn-true extensions.
-  // ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
-  // TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
-  // upgrade to ICU 4.9.
   var tag = %CanonicalizeLanguageTag(localeString);
   if (tag === 'invalid-tag') {
     throw MakeRangeError(kInvalidLanguageTag, localeString);
@@ -751,11 +762,8 @@
  * Throws on locales that are not well formed BCP47 tags.
  */
 function initializeLocaleList(locales) {
-  var seen = [];
-  if (IS_UNDEFINED(locales)) {
-    // Constructor is called without arguments.
-    seen = [];
-  } else {
+  var seen = new InternalArray();
+  if (!IS_UNDEFINED(locales)) {
     // We allow single string localeID.
     if (typeof locales === 'string') {
       %_Call(ArrayPush, seen, canonicalizeLanguageTag(locales));
@@ -810,8 +818,8 @@
   // Skip language since it can match variant regex, so we start from 1.
   // We are matching i-klingon here, but that's ok, since i-klingon-klingon
   // is not valid and would fail LANGUAGE_TAG_RE test.
-  var variants = [];
-  var extensions = [];
+  var variants = new InternalArray();
+  var extensions = new InternalArray();
   var parts = %_Call(StringSplit, locale, '-');
   for (var i = 1; i < parts.length; i++) {
     var value = parts[i];
@@ -965,8 +973,8 @@
   // We define all properties C++ code may produce, to prevent security
   // problems. If malicious user decides to redefine Object.prototype.locale
   // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
-  // ObjectDefineProperties will either succeed defining or throw an error.
-  var resolved = ObjectDefineProperties({}, {
+  // %object_define_properties will either succeed defining or throw an error.
+  var resolved = %object_define_properties({}, {
     caseFirst: {writable: true},
     collation: {value: internalOptions.collation, writable: true},
     ignorePunctuation: {writable: true},
@@ -985,7 +993,9 @@
   // Writable, configurable and enumerable are set to false by default.
   %MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
   collator[resolvedSymbol] = resolved;
-  ObjectDefineProperty(collator, 'resolved', resolvedAccessor);
+  if (FLAG_intl_extra) {
+    %object_define_property(collator, 'resolved', resolvedAccessor);
+  }
 
   return collator;
 }
@@ -1072,7 +1082,7 @@
 };
 
 
-AddBoundMethod(Intl.Collator, 'compare', compare, 2);
+AddBoundMethod(Intl.Collator, 'compare', compare, 2, 'collator');
 
 /**
  * Verifies that the input is a well-formed ISO 4217 currency code.
@@ -1198,7 +1208,7 @@
                              getOption, internalOptions);
 
   var requestedLocale = locale.locale + extension;
-  var resolved = ObjectDefineProperties({}, {
+  var resolved = %object_define_properties({}, {
     currency: {writable: true},
     currencyDisplay: {writable: true},
     locale: {writable: true},
@@ -1206,7 +1216,6 @@
     minimumFractionDigits: {writable: true},
     minimumIntegerDigits: {writable: true},
     numberingSystem: {writable: true},
-    pattern: patternAccessor,
     requestedLocale: {value: requestedLocale, writable: true},
     style: {value: internalOptions.style, writable: true},
     useGrouping: {writable: true}
@@ -1222,13 +1231,16 @@
                                       resolved);
 
   if (internalOptions.style === 'currency') {
-    ObjectDefineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
-                                                       writable: true});
+    %object_define_property(resolved, 'currencyDisplay',
+        {value: currencyDisplay, writable: true});
   }
 
   %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
   numberFormat[resolvedSymbol] = resolved;
-  ObjectDefineProperty(numberFormat, 'resolved', resolvedAccessor);
+  if (FLAG_intl_extra) {
+    %object_define_property(resolved, 'pattern', patternAccessor);
+    %object_define_property(numberFormat, 'resolved', resolvedAccessor);
+  }
 
   return numberFormat;
 }
@@ -1334,14 +1346,12 @@
 /**
  * Returns a Number that represents string value that was passed in.
  */
-function parseNumber(formatter, value) {
+function IntlParseNumber(formatter, value) {
   return %InternalNumberParse(%GetImplFromInitializedIntlObject(formatter),
                               GlobalString(value));
 }
 
-
-AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
-AddBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
+AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1, 'numberformat');
 
 /**
  * Returns a string that matches LDML representation of the options object.
@@ -1508,33 +1518,33 @@
   }
 
   if (needsDefault && (defaults === 'date' || defaults === 'all')) {
-    ObjectDefineProperty(options, 'year', {value: 'numeric',
-                                           writable: true,
-                                           enumerable: true,
-                                           configurable: true});
-    ObjectDefineProperty(options, 'month', {value: 'numeric',
-                                            writable: true,
-                                            enumerable: true,
-                                            configurable: true});
-    ObjectDefineProperty(options, 'day', {value: 'numeric',
-                                          writable: true,
-                                          enumerable: true,
-                                          configurable: true});
+    %object_define_property(options, 'year', {value: 'numeric',
+                                              writable: true,
+                                              enumerable: true,
+                                              configurable: true});
+    %object_define_property(options, 'month', {value: 'numeric',
+                                               writable: true,
+                                               enumerable: true,
+                                               configurable: true});
+    %object_define_property(options, 'day', {value: 'numeric',
+                                             writable: true,
+                                             enumerable: true,
+                                             configurable: true});
   }
 
   if (needsDefault && (defaults === 'time' || defaults === 'all')) {
-    ObjectDefineProperty(options, 'hour', {value: 'numeric',
-                                           writable: true,
-                                           enumerable: true,
-                                           configurable: true});
-    ObjectDefineProperty(options, 'minute', {value: 'numeric',
-                                             writable: true,
-                                             enumerable: true,
-                                             configurable: true});
-    ObjectDefineProperty(options, 'second', {value: 'numeric',
-                                             writable: true,
-                                             enumerable: true,
-                                             configurable: true});
+    %object_define_property(options, 'hour', {value: 'numeric',
+                                              writable: true,
+                                              enumerable: true,
+                                              configurable: true});
+    %object_define_property(options, 'minute', {value: 'numeric',
+                                                writable: true,
+                                                enumerable: true,
+                                                configurable: true});
+    %object_define_property(options, 'second', {value: 'numeric',
+                                                writable: true,
+                                                enumerable: true,
+                                                configurable: true});
   }
 
   return options;
@@ -1592,7 +1602,7 @@
                              getOption, internalOptions);
 
   var requestedLocale = locale.locale + extension;
-  var resolved = ObjectDefineProperties({}, {
+  var resolved = %object_define_properties({}, {
     calendar: {writable: true},
     day: {writable: true},
     era: {writable: true},
@@ -1603,7 +1613,6 @@
     month: {writable: true},
     numberingSystem: {writable: true},
     [patternSymbol]: {writable: true},
-    pattern: patternAccessor,
     requestedLocale: {value: requestedLocale, writable: true},
     second: {writable: true},
     timeZone: {writable: true},
@@ -1622,7 +1631,10 @@
 
   %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
   dateFormat[resolvedSymbol] = resolved;
-  ObjectDefineProperty(dateFormat, 'resolved', resolvedAccessor);
+  if (FLAG_intl_extra) {
+    %object_define_property(resolved, 'pattern', patternAccessor);
+    %object_define_property(dateFormat, 'resolved', resolvedAccessor);
+  }
 
   return dateFormat;
 }
@@ -1756,15 +1768,14 @@
  * DateTimeFormat.
  * Returns undefined if date string cannot be parsed.
  */
-function parseDate(formatter, value) {
+function IntlParseDate(formatter, value) {
   return %InternalDateParse(%GetImplFromInitializedIntlObject(formatter),
                             GlobalString(value));
 }
 
 
 // 0 because date is optional argument.
-AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
-AddBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
+AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0, 'dateformat');
 
 
 /**
@@ -1826,7 +1837,7 @@
     'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
 
   var locale = resolveLocale('breakiterator', locales, options);
-  var resolved = ObjectDefineProperties({}, {
+  var resolved = %object_define_properties({}, {
     requestedLocale: {value: locale.locale, writable: true},
     type: {value: internalOptions.type, writable: true},
     locale: {writable: true}
@@ -1839,7 +1850,9 @@
   %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
                                      internalIterator);
   iterator[resolvedSymbol] = resolved;
-  ObjectDefineProperty(iterator, 'resolved', resolvedAccessor);
+  if (FLAG_intl_extra) {
+    %object_define_property(iterator, 'resolved', resolvedAccessor);
+  }
 
   return iterator;
 }
@@ -1950,11 +1963,13 @@
 }
 
 
-AddBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
-AddBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
-AddBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
-AddBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
-AddBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1,
+               'breakiterator');
+AddBoundMethod(Intl.v8BreakIterator, 'first', first, 0, 'breakiterator');
+AddBoundMethod(Intl.v8BreakIterator, 'next', next, 0, 'breakiterator');
+AddBoundMethod(Intl.v8BreakIterator, 'current', current, 0, 'breakiterator');
+AddBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0,
+               'breakiterator');
 
 // Save references to Intl objects and methods we use, for added security.
 var savedObjects = {
@@ -1992,6 +2007,37 @@
   return new savedObjects[service](locales, useOptions);
 }
 
+function LocaleConvertCase(s, locales, isToUpper) {
+  // ECMA 402 section 13.1.2 steps 1 through 12.
+  var language;
+  // Optimize for the most common two cases. initializeLocaleList() can handle
+  // them as well, but it's rather slow accounting for over 60% of
+  // toLocale{U,L}Case() and about 40% of toLocale{U,L}Case("<locale>").
+  if (IS_UNDEFINED(locales)) {
+    language = GetDefaultICULocaleJS();
+  } else if (IS_STRING(locales)) {
+    language = canonicalizeLanguageTag(locales);
+  } else {
+    var locales = initializeLocaleList(locales);
+    language = locales.length > 0 ? locales[0] : GetDefaultICULocaleJS();
+  }
+
+  // StringSplit is slower than this.
+  var pos = %_Call(StringIndexOf, language, '-');
+  if (pos != -1) {
+    language = %_Call(StringSubstring, language, 0, pos);
+  }
+
+  var CUSTOM_CASE_LANGUAGES = ['az', 'el', 'lt', 'tr'];
+  var langIndex = %_Call(ArrayIndexOf, CUSTOM_CASE_LANGUAGES, language);
+  if (langIndex == -1) {
+    // language-independent case conversion.
+    return isToUpper ? %StringToUpperCaseI18N(s) : %StringToLowerCaseI18N(s);
+  }
+  return %StringLocaleConvertCase(s, isToUpper,
+                                  CUSTOM_CASE_LANGUAGES[langIndex]);
+}
+
 /**
  * Compares this and that, and returns less than 0, 0 or greater than 0 value.
  * Overrides the built-in method.
@@ -2044,6 +2090,56 @@
   }
 );
 
+function ToLowerCaseI18N() {
+  if (!IS_UNDEFINED(new.target)) {
+    throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+  }
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
+  var s = TO_STRING(this);
+  return %StringToLowerCaseI18N(s);
+}
+
+function ToUpperCaseI18N() {
+  if (!IS_UNDEFINED(new.target)) {
+    throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+  }
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
+  var s = TO_STRING(this);
+  return %StringToUpperCaseI18N(s);
+}
+
+function ToLocaleLowerCaseI18N(locales) {
+  if (!IS_UNDEFINED(new.target)) {
+    throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+  }
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
+  return LocaleConvertCase(TO_STRING(this), locales, false);
+}
+
+%FunctionSetLength(ToLocaleLowerCaseI18N, 0);
+
+function ToLocaleUpperCaseI18N(locales) {
+  if (!IS_UNDEFINED(new.target)) {
+    throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+  }
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
+  return LocaleConvertCase(TO_STRING(this), locales, true);
+}
+
+%FunctionSetLength(ToLocaleUpperCaseI18N, 0);
+
+%FunctionRemovePrototype(ToLowerCaseI18N);
+%FunctionRemovePrototype(ToUpperCaseI18N);
+%FunctionRemovePrototype(ToLocaleLowerCaseI18N);
+%FunctionRemovePrototype(ToLocaleUpperCaseI18N);
+
+utils.Export(function(to) {
+  to.ToLowerCaseI18N = ToLowerCaseI18N;
+  to.ToUpperCaseI18N = ToUpperCaseI18N;
+  to.ToLocaleLowerCaseI18N = ToLocaleLowerCaseI18N;
+  to.ToLocaleUpperCaseI18N = ToLocaleUpperCaseI18N;
+});
+
 
 /**
  * Formats a Number object (this) using locale and options values.
@@ -2138,4 +2234,10 @@
   }
 );
 
+utils.Export(function(to) {
+  to.AddBoundMethod = AddBoundMethod;
+  to.IntlParseDate = IntlParseDate;
+  to.IntlParseNumber = IntlParseNumber;
+});
+
 })
diff --git a/src/js/icu-case-mapping.js b/src/js/icu-case-mapping.js
new file mode 100644
index 0000000..9806249
--- /dev/null
+++ b/src/js/icu-case-mapping.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalString = global.String;
+var OverrideFunction = utils.OverrideFunction;
+var ToLowerCaseI18N = utils.ImportNow("ToLowerCaseI18N");
+var ToUpperCaseI18N = utils.ImportNow("ToUpperCaseI18N");
+var ToLocaleLowerCaseI18N = utils.ImportNow("ToLocaleLowerCaseI18N");
+var ToLocaleUpperCaseI18N = utils.ImportNow("ToLocaleUpperCaseI18N");
+
+OverrideFunction(GlobalString.prototype, 'toLowerCase', ToLowerCaseI18N, true);
+OverrideFunction(GlobalString.prototype, 'toUpperCase', ToUpperCaseI18N, true);
+OverrideFunction(GlobalString.prototype, 'toLocaleLowerCase',
+                 ToLocaleLowerCaseI18N, true);
+OverrideFunction(GlobalString.prototype, 'toLocaleUpperCase',
+                 ToLocaleUpperCaseI18N, true);
+
+})
diff --git a/src/js/intl-extra.js b/src/js/intl-extra.js
new file mode 100644
index 0000000..a4d2256
--- /dev/null
+++ b/src/js/intl-extra.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalIntl = global.Intl;
+
+var AddBoundMethod = utils.ImportNow("AddBoundMethod");
+var IntlParseDate = utils.ImportNow("IntlParseDate");
+var IntlParseNumber = utils.ImportNow("IntlParseNumber");
+
+AddBoundMethod(GlobalIntl.DateTimeFormat, 'v8Parse', IntlParseDate, 1,
+               'dateformat');
+AddBoundMethod(GlobalIntl.NumberFormat, 'v8Parse', IntlParseNumber, 1,
+               'numberformat');
+
+})
diff --git a/src/js/macros.py b/src/js/macros.py
index a4c7f53..3cc2d6c 100644
--- a/src/js/macros.py
+++ b/src/js/macros.py
@@ -113,6 +113,7 @@
 macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(arg));
 macro TO_INT32(arg) = ((arg) | 0);
 macro TO_UINT32(arg) = ((arg) >>> 0);
+macro INVERT_NEG_ZERO(arg) = ((arg) + 0);
 macro TO_LENGTH(arg) = (%_ToLength(arg));
 macro TO_STRING(arg) = (%_ToString(arg));
 macro TO_NUMBER(arg) = (%_ToNumber(arg));
@@ -174,16 +175,6 @@
 # 1-based so index of 1 returns the first capture
 macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]);
 
-# PropertyDescriptor return value indices - must match
-# PropertyDescriptorIndices in runtime-object.cc.
-define IS_ACCESSOR_INDEX = 0;
-define VALUE_INDEX = 1;
-define GETTER_INDEX = 2;
-define SETTER_INDEX = 3;
-define WRITABLE_INDEX = 4;
-define ENUMERABLE_INDEX = 5;
-define CONFIGURABLE_INDEX = 6;
-
 # For messages.js
 # Matches Script::Type from objects.h
 define TYPE_NATIVE = 0;
@@ -238,7 +229,6 @@
 
 # Check whether debug is active.
 define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
-macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (%_DebugIsActive() != 0) %DebugPrepareStepInIfStepping(function);
 
 # SharedFlag equivalents
 define kNotShared = false;
@@ -251,7 +241,6 @@
 define kMarkDequeOverflow = 3;
 define kStoreBufferOverflow = 4;
 define kSlotsBufferOverflow = 5;
-define kObjectObserve = 6;
 define kForcedGC = 7;
 define kSloppyMode = 8;
 define kStrictMode = 9;
diff --git a/src/js/messages.js b/src/js/messages.js
index f8cb967..b5c4b56 100644
--- a/src/js/messages.js
+++ b/src/js/messages.js
@@ -23,6 +23,10 @@
     utils.ImportNow("call_site_position_symbol");
 var callSiteStrictSymbol =
     utils.ImportNow("call_site_strict_symbol");
+var callSiteWasmObjectSymbol =
+    utils.ImportNow("call_site_wasm_obj_symbol");
+var callSiteWasmFunctionIndexSymbol =
+    utils.ImportNow("call_site_wasm_func_index_symbol");
 var Float32x4ToString;
 var formattedStackTraceSymbol =
     utils.ImportNow("formatted_stack_trace_symbol");
@@ -32,12 +36,10 @@
 var Int8x16ToString;
 var InternalArray = utils.InternalArray;
 var internalErrorSymbol = utils.ImportNow("internal_error_symbol");
-var ObjectDefineProperty;
 var ObjectHasOwnProperty;
 var ObjectToString = utils.ImportNow("object_to_string");
 var Script = utils.ImportNow("Script");
 var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
-var StringCharAt;
 var StringIndexOf;
 var StringSubstring;
 var SymbolToString;
@@ -55,9 +57,7 @@
   Int16x8ToString = from.Int16x8ToString;
   Int32x4ToString = from.Int32x4ToString;
   Int8x16ToString = from.Int8x16ToString;
-  ObjectDefineProperty = from.ObjectDefineProperty;
   ObjectHasOwnProperty = from.ObjectHasOwnProperty;
-  StringCharAt = from.StringCharAt;
   StringIndexOf = from.StringIndexOf;
   StringSubstring = from.StringSubstring;
   SymbolToString = from.SymbolToString;
@@ -255,6 +255,7 @@
   return -1;
 }
 
+
 /**
  * Get information on a specific source position.
  * @param {number} position The source position
@@ -272,7 +273,7 @@
   var line_ends = this.line_ends;
   var start = line == 0 ? 0 : line_ends[line - 1] + 1;
   var end = line_ends[line];
-  if (end > 0 && %_Call(StringCharAt, this.source, end - 1) == '\r') {
+  if (end > 0 && %_StringCharAt(this.source, end - 1) === '\r') {
     end--;
   }
   var column = position - start;
@@ -556,7 +557,9 @@
 // Error implementation
 
 function CallSite(receiver, fun, pos, strict_mode) {
-  if (!IS_FUNCTION(fun)) {
+  // For wasm frames, receiver is the wasm object and fun is the function index
+  // instead of an actual function.
+  if (!IS_FUNCTION(fun) && !IS_NUMBER(fun)) {
     throw MakeTypeError(kCallSiteExpectsFunction, typeof fun);
   }
 
@@ -564,14 +567,19 @@
     return new CallSite(receiver, fun, pos, strict_mode);
   }
 
-  SET_PRIVATE(this, callSiteReceiverSymbol, receiver);
-  SET_PRIVATE(this, callSiteFunctionSymbol, fun);
+  if (IS_FUNCTION(fun)) {
+    SET_PRIVATE(this, callSiteReceiverSymbol, receiver);
+    SET_PRIVATE(this, callSiteFunctionSymbol, fun);
+  } else {
+    SET_PRIVATE(this, callSiteWasmObjectSymbol, receiver);
+    SET_PRIVATE(this, callSiteWasmFunctionIndexSymbol, TO_UINT32(fun));
+  }
   SET_PRIVATE(this, callSitePositionSymbol, TO_INT32(pos));
   SET_PRIVATE(this, callSiteStrictSymbol, TO_BOOLEAN(strict_mode));
 }
 
 function CheckCallSite(obj, name) {
-  if (!IS_RECEIVER(obj) || !HAS_PRIVATE(obj, callSiteFunctionSymbol)) {
+  if (!IS_RECEIVER(obj) || !HAS_PRIVATE(obj, callSitePositionSymbol)) {
     throw MakeTypeError(kCallSiteMethod, name);
   }
 }
@@ -622,6 +630,12 @@
 function CallSiteGetFunctionName() {
   // See if the function knows its own name
   CheckCallSite(this, "getFunctionName");
+  if (HAS_PRIVATE(this, callSiteWasmObjectSymbol)) {
+    var wasm = GET_PRIVATE(this, callSiteWasmObjectSymbol);
+    var func_index = GET_PRIVATE(this, callSiteWasmFunctionIndexSymbol);
+    if (IS_UNDEFINED(wasm)) return "<WASM>";
+    return %WasmGetFunctionName(wasm, func_index);
+  }
   return %CallSiteGetFunctionNameRT(this);
 }
 
@@ -638,6 +652,9 @@
 }
 
 function CallSiteGetLineNumber() {
+  if (HAS_PRIVATE(this, callSiteWasmObjectSymbol)) {
+    return GET_PRIVATE(this, callSiteWasmFunctionIndexSymbol);
+  }
   CheckCallSite(this, "getLineNumber");
   return %CallSiteGetLineNumberRT(this);
 }
@@ -658,6 +675,13 @@
 }
 
 function CallSiteToString() {
+  if (HAS_PRIVATE(this, callSiteWasmObjectSymbol)) {
+    var funName = this.getFunctionName();
+    var funcIndex = GET_PRIVATE(this, callSiteWasmFunctionIndexSymbol);
+    var pos = this.getPosition();
+    return funName + " (<WASM>:" + funcIndex + ":" + pos + ")";
+  }
+
   var fileName;
   var fileLocation = "";
   if (this.isNative()) {
@@ -795,14 +819,19 @@
 
 
 function GetStackFrames(raw_stack) {
+  var internal_raw_stack = new InternalArray();
+  %MoveArrayContents(raw_stack, internal_raw_stack);
   var frames = new InternalArray();
-  var sloppy_frames = raw_stack[0];
-  for (var i = 1; i < raw_stack.length; i += 4) {
-    var recv = raw_stack[i];
-    var fun = raw_stack[i + 1];
-    var code = raw_stack[i + 2];
-    var pc = raw_stack[i + 3];
-    var pos = %_IsSmi(code) ? code : %FunctionGetPositionForOffset(code, pc);
+  var sloppy_frames = internal_raw_stack[0];
+  for (var i = 1; i < internal_raw_stack.length; i += 4) {
+    var recv = internal_raw_stack[i];
+    var fun = internal_raw_stack[i + 1];
+    var code = internal_raw_stack[i + 2];
+    var pc = internal_raw_stack[i + 3];
+    // For traps in wasm, the bytecode offset is passed as (-1 - offset).
+    // Otherwise, lookup the position from the pc.
+    var pos = IS_NUMBER(fun) && pc < 0 ? (-1 - pc) :
+      %FunctionGetPositionForOffset(code, pc);
     sloppy_frames--;
     frames.push(new CallSite(recv, fun, pos, (sloppy_frames < 0)));
   }
@@ -879,7 +908,7 @@
       if (IS_UNDEFINED(stack_trace)) {
         // Neither formatted nor structured stack trace available.
         // Look further up the prototype chain.
-        holder = %_GetPrototype(holder);
+        holder = %object_get_prototype_of(holder);
         continue;
       }
       formatted_stack_trace = FormatStackTrace(holder, stack_trace);
@@ -995,9 +1024,9 @@
 // Define actual captureStackTrace function after everything has been set up.
 captureStackTrace = function captureStackTrace(obj, cons_opt) {
   // Define accessors first, as this may fail and throw.
-  ObjectDefineProperty(obj, 'stack', { get: StackTraceGetter,
-                                       set: StackTraceSetter,
-                                       configurable: true });
+  %object_define_property(obj, 'stack', { get: StackTraceGetter,
+                                          set: StackTraceSetter,
+                                          configurable: true });
   %CollectStackTrace(obj, cons_opt ? cons_opt : captureStackTrace);
 };
 
diff --git a/src/js/object-observe.js b/src/js/object-observe.js
deleted file mode 100644
index 5e256bf..0000000
--- a/src/js/object-observe.js
+++ /dev/null
@@ -1,717 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GetHash;
-var GlobalArray = global.Array;
-var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
-var MakeTypeError;
-
-utils.Import(function(from) {
-  GetHash = from.GetHash;
-  MakeTypeError = from.MakeTypeError;
-});
-
-// -------------------------------------------------------------------
-
-// Overview:
-//
-// This file contains all of the routing and accounting for Object.observe.
-// User code will interact with these mechanisms via the Object.observe APIs
-// and, as a side effect of mutation objects which are observed. The V8 runtime
-// (both C++ and JS) will interact with these mechanisms primarily by enqueuing
-// proper change records for objects which were mutated. The Object.observe
-// routing and accounting consists primarily of three participants
-//
-// 1) ObjectInfo. This represents the observed state of a given object. It
-//    records what callbacks are observing the object, with what options, and
-//    what "change types" are in progress on the object (i.e. via
-//    notifier.performChange).
-//
-// 2) CallbackInfo. This represents a callback used for observation. It holds
-//    the records which must be delivered to the callback, as well as the global
-//    priority of the callback (which determines delivery order between
-//    callbacks).
-//
-// 3) observationState.pendingObservers. This is the set of observers which
-//    have change records which must be delivered. During "normal" delivery
-//    (i.e. not Object.deliverChangeRecords), this is the mechanism by which
-//    callbacks are invoked in the proper order until there are no more
-//    change records pending to a callback.
-//
-// Note that in order to reduce allocation and processing costs, the
-// implementation of (1) and (2) have "optimized" states which represent
-// common cases which can be handled more efficiently.
-
-var observationState;
-
-var notifierPrototype = {};
-
-// We have to wait until after bootstrapping to grab a reference to the
-// observationState object, since it's not possible to serialize that
-// reference into the snapshot.
-function GetObservationStateJS() {
-  if (IS_UNDEFINED(observationState)) {
-    observationState = %GetObservationState();
-  }
-
-  // TODO(adamk): Consider moving this code into heap.cc
-  if (IS_UNDEFINED(observationState.callbackInfoMap)) {
-    observationState.callbackInfoMap = %ObservationWeakMapCreate();
-    observationState.objectInfoMap = %ObservationWeakMapCreate();
-    observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
-    observationState.pendingObservers = null;
-    observationState.nextCallbackPriority = 0;
-    observationState.lastMicrotaskId = 0;
-  }
-
-  return observationState;
-}
-
-
-function GetPendingObservers() {
-  return GetObservationStateJS().pendingObservers;
-}
-
-
-function SetPendingObservers(pendingObservers) {
-  GetObservationStateJS().pendingObservers = pendingObservers;
-}
-
-
-function GetNextCallbackPriority() {
-  return GetObservationStateJS().nextCallbackPriority++;
-}
-
-
-function nullProtoObject() {
-  return { __proto__: null };
-}
-
-
-function TypeMapCreate() {
-  return nullProtoObject();
-}
-
-
-function TypeMapAddType(typeMap, type, ignoreDuplicate) {
-  typeMap[type] = ignoreDuplicate ? 1 : (typeMap[type] || 0) + 1;
-}
-
-
-function TypeMapRemoveType(typeMap, type) {
-  typeMap[type]--;
-}
-
-
-function TypeMapCreateFromList(typeList, length) {
-  var typeMap = TypeMapCreate();
-  for (var i = 0; i < length; i++) {
-    TypeMapAddType(typeMap, typeList[i], true);
-  }
-  return typeMap;
-}
-
-
-function TypeMapHasType(typeMap, type) {
-  return !!typeMap[type];
-}
-
-
-function TypeMapIsDisjointFrom(typeMap1, typeMap2) {
-  if (!typeMap1 || !typeMap2)
-    return true;
-
-  for (var type in typeMap1) {
-    if (TypeMapHasType(typeMap1, type) && TypeMapHasType(typeMap2, type))
-      return false;
-  }
-
-  return true;
-}
-
-
-var defaultAcceptTypes = (function() {
-  var defaultTypes = [
-    'add',
-    'update',
-    'delete',
-    'setPrototype',
-    'reconfigure',
-    'preventExtensions'
-  ];
-  return TypeMapCreateFromList(defaultTypes, defaultTypes.length);
-})();
-
-
-// An Observer is a registration to observe an object by a callback with
-// a given set of accept types. If the set of accept types is the default
-// set for Object.observe, the observer is represented as a direct reference
-// to the callback. An observer never changes its accept types and thus never
-// needs to "normalize".
-function ObserverCreate(callback, acceptList) {
-  if (IS_UNDEFINED(acceptList))
-    return callback;
-  var observer = nullProtoObject();
-  observer.callback = callback;
-  observer.accept = acceptList;
-  return observer;
-}
-
-
-function ObserverGetCallback(observer) {
-  return IS_CALLABLE(observer) ? observer : observer.callback;
-}
-
-
-function ObserverGetAcceptTypes(observer) {
-  return IS_CALLABLE(observer) ? defaultAcceptTypes : observer.accept;
-}
-
-
-function ObserverIsActive(observer, objectInfo) {
-  return TypeMapIsDisjointFrom(ObjectInfoGetPerformingTypes(objectInfo),
-                               ObserverGetAcceptTypes(observer));
-}
-
-
-function ObjectInfoGetOrCreate(object) {
-  var objectInfo = ObjectInfoGet(object);
-  if (IS_UNDEFINED(objectInfo)) {
-    if (!IS_PROXY(object)) {
-      %SetIsObserved(object);
-    }
-    objectInfo = {
-      object: object,
-      changeObservers: null,
-      notifier: null,
-      performing: null,
-      performingCount: 0,
-    };
-    %WeakCollectionSet(GetObservationStateJS().objectInfoMap,
-                       object, objectInfo, GetHash(object));
-  }
-  return objectInfo;
-}
-
-
-function ObjectInfoGet(object) {
-  return %WeakCollectionGet(GetObservationStateJS().objectInfoMap, object,
-                            GetHash(object));
-}
-
-
-function ObjectInfoGetFromNotifier(notifier) {
-  return %WeakCollectionGet(GetObservationStateJS().notifierObjectInfoMap,
-                            notifier, GetHash(notifier));
-}
-
-
-function ObjectInfoGetNotifier(objectInfo) {
-  if (IS_NULL(objectInfo.notifier)) {
-    var notifier = { __proto__: notifierPrototype };
-    objectInfo.notifier = notifier;
-    %WeakCollectionSet(GetObservationStateJS().notifierObjectInfoMap,
-                       notifier, objectInfo, GetHash(notifier));
-  }
-
-  return objectInfo.notifier;
-}
-
-
-function ChangeObserversIsOptimized(changeObservers) {
-  return IS_CALLABLE(changeObservers) ||
-         IS_CALLABLE(changeObservers.callback);
-}
-
-
-// The set of observers on an object is called 'changeObservers'. The first
-// observer is referenced directly via objectInfo.changeObservers. When a second
-// is added, changeObservers "normalizes" to become a mapping of callback
-// priority -> observer and is then stored on objectInfo.changeObservers.
-function ObjectInfoNormalizeChangeObservers(objectInfo) {
-  if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
-    var observer = objectInfo.changeObservers;
-    var callback = ObserverGetCallback(observer);
-    var callbackInfo = CallbackInfoGet(callback);
-    var priority = CallbackInfoGetPriority(callbackInfo);
-    objectInfo.changeObservers = nullProtoObject();
-    objectInfo.changeObservers[priority] = observer;
-  }
-}
-
-
-function ObjectInfoAddObserver(objectInfo, callback, acceptList) {
-  var callbackInfo = CallbackInfoGetOrCreate(callback);
-  var observer = ObserverCreate(callback, acceptList);
-
-  if (!objectInfo.changeObservers) {
-    objectInfo.changeObservers = observer;
-    return;
-  }
-
-  ObjectInfoNormalizeChangeObservers(objectInfo);
-  var priority = CallbackInfoGetPriority(callbackInfo);
-  objectInfo.changeObservers[priority] = observer;
-}
-
-function ObjectInfoRemoveObserver(objectInfo, callback) {
-  if (!objectInfo.changeObservers)
-    return;
-
-  if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
-    if (callback === ObserverGetCallback(objectInfo.changeObservers))
-      objectInfo.changeObservers = null;
-    return;
-  }
-
-  var callbackInfo = CallbackInfoGet(callback);
-  var priority = CallbackInfoGetPriority(callbackInfo);
-  objectInfo.changeObservers[priority] = null;
-}
-
-function ObjectInfoHasActiveObservers(objectInfo) {
-  if (IS_UNDEFINED(objectInfo) || !objectInfo.changeObservers)
-    return false;
-
-  if (ChangeObserversIsOptimized(objectInfo.changeObservers))
-    return ObserverIsActive(objectInfo.changeObservers, objectInfo);
-
-  for (var priority in objectInfo.changeObservers) {
-    var observer = objectInfo.changeObservers[priority];
-    if (!IS_NULL(observer) && ObserverIsActive(observer, objectInfo))
-      return true;
-  }
-
-  return false;
-}
-
-
-function ObjectInfoAddPerformingType(objectInfo, type) {
-  objectInfo.performing = objectInfo.performing || TypeMapCreate();
-  TypeMapAddType(objectInfo.performing, type);
-  objectInfo.performingCount++;
-}
-
-
-function ObjectInfoRemovePerformingType(objectInfo, type) {
-  objectInfo.performingCount--;
-  TypeMapRemoveType(objectInfo.performing, type);
-}
-
-
-function ObjectInfoGetPerformingTypes(objectInfo) {
-  return objectInfo.performingCount > 0 ? objectInfo.performing : null;
-}
-
-
-function ConvertAcceptListToTypeMap(arg) {
-  // We use undefined as a sentinel for the default accept list.
-  if (IS_UNDEFINED(arg))
-    return arg;
-
-  if (!IS_RECEIVER(arg)) throw MakeTypeError(kObserveInvalidAccept);
-
-  var len = TO_INTEGER(arg.length);
-  if (len < 0) len = 0;
-
-  return TypeMapCreateFromList(arg, len);
-}
-
-
-// CallbackInfo's optimized state is just a number which represents its global
-// priority. When a change record must be enqueued for the callback, it
-// normalizes. When delivery clears any pending change records, it re-optimizes.
-function CallbackInfoGet(callback) {
-  return %WeakCollectionGet(GetObservationStateJS().callbackInfoMap, callback,
-                            GetHash(callback));
-}
-
-
-function CallbackInfoSet(callback, callbackInfo) {
-  %WeakCollectionSet(GetObservationStateJS().callbackInfoMap,
-                     callback, callbackInfo, GetHash(callback));
-}
-
-
-function CallbackInfoGetOrCreate(callback) {
-  var callbackInfo = CallbackInfoGet(callback);
-  if (!IS_UNDEFINED(callbackInfo))
-    return callbackInfo;
-
-  var priority = GetNextCallbackPriority();
-  CallbackInfoSet(callback, priority);
-  return priority;
-}
-
-
-function CallbackInfoGetPriority(callbackInfo) {
-  if (IS_NUMBER(callbackInfo))
-    return callbackInfo;
-  else
-    return callbackInfo.priority;
-}
-
-
-function CallbackInfoNormalize(callback) {
-  var callbackInfo = CallbackInfoGet(callback);
-  if (IS_NUMBER(callbackInfo)) {
-    var priority = callbackInfo;
-    callbackInfo = new InternalArray;
-    callbackInfo.priority = priority;
-    CallbackInfoSet(callback, callbackInfo);
-  }
-  return callbackInfo;
-}
-
-
-function ObjectObserve(object, callback, acceptList) {
-  if (!IS_RECEIVER(object))
-    throw MakeTypeError(kObserveNonObject, "observe", "observe");
-  if (%IsJSGlobalProxy(object))
-    throw MakeTypeError(kObserveGlobalProxy, "observe");
-  if (%IsAccessCheckNeeded(object))
-    throw MakeTypeError(kObserveAccessChecked, "observe");
-  if (!IS_CALLABLE(callback))
-    throw MakeTypeError(kObserveNonFunction, "observe");
-  if (%object_is_frozen(callback))
-    throw MakeTypeError(kObserveCallbackFrozen);
-
-  var objectObserveFn = %GetObjectContextObjectObserve(object);
-  return objectObserveFn(object, callback, acceptList);
-}
-
-
-function NativeObjectObserve(object, callback, acceptList) {
-  var objectInfo = ObjectInfoGetOrCreate(object);
-  var typeList = ConvertAcceptListToTypeMap(acceptList);
-  ObjectInfoAddObserver(objectInfo, callback, typeList);
-  return object;
-}
-
-
-function ObjectUnobserve(object, callback) {
-  if (!IS_RECEIVER(object))
-    throw MakeTypeError(kObserveNonObject, "unobserve", "unobserve");
-  if (%IsJSGlobalProxy(object))
-    throw MakeTypeError(kObserveGlobalProxy, "unobserve");
-  if (!IS_CALLABLE(callback))
-    throw MakeTypeError(kObserveNonFunction, "unobserve");
-
-  var objectInfo = ObjectInfoGet(object);
-  if (IS_UNDEFINED(objectInfo))
-    return object;
-
-  ObjectInfoRemoveObserver(objectInfo, callback);
-  return object;
-}
-
-
-function ArrayObserve(object, callback) {
-  return ObjectObserve(object, callback, ['add',
-                                          'update',
-                                          'delete',
-                                          'splice']);
-}
-
-
-function ArrayUnobserve(object, callback) {
-  return ObjectUnobserve(object, callback);
-}
-
-
-function ObserverEnqueueIfActive(observer, objectInfo, changeRecord) {
-  if (!ObserverIsActive(observer, objectInfo) ||
-      !TypeMapHasType(ObserverGetAcceptTypes(observer), changeRecord.type)) {
-    return;
-  }
-
-  var callback = ObserverGetCallback(observer);
-  if (!%ObserverObjectAndRecordHaveSameOrigin(callback, changeRecord.object,
-                                              changeRecord)) {
-    return;
-  }
-
-  var callbackInfo = CallbackInfoNormalize(callback);
-  if (IS_NULL(GetPendingObservers())) {
-    SetPendingObservers(nullProtoObject());
-    if (DEBUG_IS_ACTIVE) {
-      var id = ++GetObservationStateJS().lastMicrotaskId;
-      var name = "Object.observe";
-      %EnqueueMicrotask(function() {
-        %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
-        ObserveMicrotaskRunner();
-        %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
-      });
-      %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
-    } else {
-      %EnqueueMicrotask(ObserveMicrotaskRunner);
-    }
-  }
-  GetPendingObservers()[callbackInfo.priority] = callback;
-  callbackInfo.push(changeRecord);
-}
-
-
-function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
-  if (!ObjectInfoHasActiveObservers(objectInfo))
-    return;
-
-  var hasType = !IS_UNDEFINED(type);
-  var newRecord = hasType ?
-      { object: objectInfo.object, type: type } :
-      { object: objectInfo.object };
-
-  for (var prop in changeRecord) {
-    if (prop === 'object' || (hasType && prop === 'type')) continue;
-    %DefineDataPropertyUnchecked(
-        newRecord, prop, changeRecord[prop], READ_ONLY + DONT_DELETE);
-  }
-  %object_freeze(newRecord);
-
-  ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord);
-}
-
-
-function ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord) {
-  // TODO(rossberg): adjust once there is a story for symbols vs proxies.
-  if (IS_SYMBOL(changeRecord.name)) return;
-
-  if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
-    var observer = objectInfo.changeObservers;
-    ObserverEnqueueIfActive(observer, objectInfo, changeRecord);
-    return;
-  }
-
-  for (var priority in objectInfo.changeObservers) {
-    var observer = objectInfo.changeObservers[priority];
-    if (IS_NULL(observer))
-      continue;
-    ObserverEnqueueIfActive(observer, objectInfo, changeRecord);
-  }
-}
-
-
-function BeginPerformSplice(array) {
-  var objectInfo = ObjectInfoGet(array);
-  if (!IS_UNDEFINED(objectInfo))
-    ObjectInfoAddPerformingType(objectInfo, 'splice');
-}
-
-
-function EndPerformSplice(array) {
-  var objectInfo = ObjectInfoGet(array);
-  if (!IS_UNDEFINED(objectInfo))
-    ObjectInfoRemovePerformingType(objectInfo, 'splice');
-}
-
-
-function EnqueueSpliceRecord(array, index, removed, addedCount) {
-  var objectInfo = ObjectInfoGet(array);
-  if (!ObjectInfoHasActiveObservers(objectInfo))
-    return;
-
-  var changeRecord = {
-    type: 'splice',
-    object: array,
-    index: index,
-    removed: removed,
-    addedCount: addedCount
-  };
-
-  %object_freeze(changeRecord);
-  %object_freeze(changeRecord.removed);
-  ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
-}
-
-
-function NotifyChange(type, object, name, oldValue) {
-  var objectInfo = ObjectInfoGet(object);
-  if (!ObjectInfoHasActiveObservers(objectInfo))
-    return;
-
-  var changeRecord;
-  if (arguments.length == 2) {
-    changeRecord = { type: type, object: object };
-  } else if (arguments.length == 3) {
-    changeRecord = { type: type, object: object, name: name };
-  } else {
-    changeRecord = {
-      type: type,
-      object: object,
-      name: name,
-      oldValue: oldValue
-    };
-  }
-
-  %object_freeze(changeRecord);
-  ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
-}
-
-
-function ObjectNotifierNotify(changeRecord) {
-  if (!IS_RECEIVER(this))
-    throw MakeTypeError(kCalledOnNonObject, "notify");
-
-  var objectInfo = ObjectInfoGetFromNotifier(this);
-  if (IS_UNDEFINED(objectInfo))
-    throw MakeTypeError(kObserveNotifyNonNotifier);
-  if (!IS_STRING(changeRecord.type))
-    throw MakeTypeError(kObserveTypeNonString);
-
-  ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord);
-}
-
-
-function ObjectNotifierPerformChange(changeType, changeFn) {
-  if (!IS_RECEIVER(this))
-    throw MakeTypeError(kCalledOnNonObject, "performChange");
-
-  var objectInfo = ObjectInfoGetFromNotifier(this);
-  if (IS_UNDEFINED(objectInfo))
-    throw MakeTypeError(kObserveNotifyNonNotifier);
-  if (!IS_STRING(changeType))
-    throw MakeTypeError(kObservePerformNonString);
-  if (!IS_CALLABLE(changeFn))
-    throw MakeTypeError(kObservePerformNonFunction);
-
-  var performChangeFn = %GetObjectContextNotifierPerformChange(objectInfo);
-  performChangeFn(objectInfo, changeType, changeFn);
-}
-
-
-function NativeObjectNotifierPerformChange(objectInfo, changeType, changeFn) {
-  ObjectInfoAddPerformingType(objectInfo, changeType);
-
-  var changeRecord;
-  try {
-    changeRecord = changeFn();
-  } finally {
-    ObjectInfoRemovePerformingType(objectInfo, changeType);
-  }
-
-  if (IS_RECEIVER(changeRecord))
-    ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, changeType);
-}
-
-
-function ObjectGetNotifier(object) {
-  if (!IS_RECEIVER(object))
-    throw MakeTypeError(kObserveNonObject, "getNotifier", "getNotifier");
-  if (%IsJSGlobalProxy(object))
-    throw MakeTypeError(kObserveGlobalProxy, "getNotifier");
-  if (%IsAccessCheckNeeded(object))
-    throw MakeTypeError(kObserveAccessChecked, "getNotifier");
-
-  if (%object_is_frozen(object)) return null;
-
-  if (!%ObjectWasCreatedInCurrentOrigin(object)) return null;
-
-  var getNotifierFn = %GetObjectContextObjectGetNotifier(object);
-  return getNotifierFn(object);
-}
-
-
-function NativeObjectGetNotifier(object) {
-  var objectInfo = ObjectInfoGetOrCreate(object);
-  return ObjectInfoGetNotifier(objectInfo);
-}
-
-
-function CallbackDeliverPending(callback) {
-  var callbackInfo = CallbackInfoGet(callback);
-  if (IS_UNDEFINED(callbackInfo) || IS_NUMBER(callbackInfo))
-    return false;
-
-  // Clear the pending change records from callback and return it to its
-  // "optimized" state.
-  var priority = callbackInfo.priority;
-  CallbackInfoSet(callback, priority);
-
-  var pendingObservers = GetPendingObservers();
-  if (!IS_NULL(pendingObservers))
-    delete pendingObservers[priority];
-
-  // TODO: combine the following runtime calls for perf optimization.
-  var delivered = [];
-  %MoveArrayContents(callbackInfo, delivered);
-  %DeliverObservationChangeRecords(callback, delivered);
-
-  return true;
-}
-
-
-function ObjectDeliverChangeRecords(callback) {
-  if (!IS_CALLABLE(callback))
-    throw MakeTypeError(kObserveNonFunction, "deliverChangeRecords");
-
-  while (CallbackDeliverPending(callback)) {}
-}
-
-
-function ObserveMicrotaskRunner() {
-  var pendingObservers = GetPendingObservers();
-  if (!IS_NULL(pendingObservers)) {
-    SetPendingObservers(null);
-    for (var i in pendingObservers) {
-      CallbackDeliverPending(pendingObservers[i]);
-    }
-  }
-}
-
-// -------------------------------------------------------------------
-
-utils.InstallFunctions(notifierPrototype, DONT_ENUM, [
-  "notify", ObjectNotifierNotify,
-  "performChange", ObjectNotifierPerformChange
-]);
-
-var ObserveObjectMethods = [
-  "deliverChangeRecords", ObjectDeliverChangeRecords,
-  "getNotifier", ObjectGetNotifier,
-  "observe", ObjectObserve,
-  "unobserve", ObjectUnobserve
-];
-
-var ObserveArrayMethods = [
-  "observe", ArrayObserve,
-  "unobserve", ArrayUnobserve
-];
-
-// TODO(adamk): Figure out why this prototype removal has to
-// happen as part of initial snapshotting.
-var removePrototypeFn = function(f, i) {
-  if (i % 2 === 1) %FunctionRemovePrototype(f);
-};
-ObserveObjectMethods.forEach(removePrototypeFn);
-ObserveArrayMethods.forEach(removePrototypeFn);
-
-%InstallToContext([
-  "native_object_get_notifier", NativeObjectGetNotifier,
-  "native_object_notifier_perform_change", NativeObjectNotifierPerformChange,
-  "native_object_observe", NativeObjectObserve,
-  "observers_begin_perform_splice", BeginPerformSplice,
-  "observers_end_perform_splice", EndPerformSplice,
-  "observers_enqueue_splice", EnqueueSpliceRecord,
-  "observers_notify_change", NotifyChange,
-]);
-
-utils.Export(function(to) {
-  to.ObserveArrayMethods = ObserveArrayMethods;
-  to.ObserveBeginPerformSplice = BeginPerformSplice;
-  to.ObserveEndPerformSplice = EndPerformSplice;
-  to.ObserveEnqueueSpliceRecord = EnqueueSpliceRecord;
-  to.ObserveObjectMethods = ObserveObjectMethods;
-});
-
-})
diff --git a/src/js/prologue.js b/src/js/prologue.js
index f9589a5..b352eb1 100644
--- a/src/js/prologue.js
+++ b/src/js/prologue.js
@@ -128,10 +128,10 @@
 
 function OverrideFunction(object, name, f, afterInitialBootstrap) {
   %CheckIsBootstrapping();
-  %ObjectDefineProperty(object, name, { value: f,
-                                        writeable: true,
-                                        configurable: true,
-                                        enumerable: false });
+  %object_define_property(object, name, { value: f,
+                                          writeable: true,
+                                          configurable: true,
+                                          enumerable: false });
   SetFunctionName(f, name);
   if (!afterInitialBootstrap) %FunctionRemovePrototype(f);
   %SetNativeFlag(f);
@@ -181,10 +181,15 @@
 
   // Whitelist of exports from normal natives to experimental natives and debug.
   var expose_list = [
+    "AddBoundMethod",
     "ArrayToString",
+    "AsyncFunctionNext",
+    "AsyncFunctionThrow",
     "ErrorToString",
     "GetIterator",
     "GetMethod",
+    "IntlParseDate",
+    "IntlParseNumber",
     "IsNaN",
     "MakeError",
     "MakeRangeError",
@@ -195,12 +200,12 @@
     "MaxSimple",
     "MinSimple",
     "NumberIsInteger",
-    "ObjectDefineProperty",
-    "ObserveArrayMethods",
-    "ObserveObjectMethods",
     "PromiseChain",
-    "PromiseDeferred",
-    "PromiseResolved",
+    "PromiseDefer",
+    "PromiseAccept",
+    "PromiseCreateRejected",
+    "PromiseCreateResolved",
+    "PromiseThen",
     "RegExpSubclassExecJS",
     "RegExpSubclassMatch",
     "RegExpSubclassReplace",
@@ -211,12 +216,16 @@
     "SetIteratorNext",
     "SetValues",
     "SymbolToString",
+    "ToLocaleLowerCaseI18N",
+    "ToLocaleUpperCaseI18N",
+    "ToLowerCaseI18N",
     "ToPositiveInteger",
+    "ToUpperCaseI18N",
     // From runtime:
     "is_concat_spreadable_symbol",
     "iterator_symbol",
-    "promise_status_symbol",
-    "promise_value_symbol",
+    "promise_result_symbol",
+    "promise_state_symbol",
     "object_freeze",
     "object_is_frozen",
     "object_is_sealed",
diff --git a/src/js/promise-extra.js b/src/js/promise-extra.js
index f6f7959..34d7323 100644
--- a/src/js/promise-extra.js
+++ b/src/js/promise-extra.js
@@ -11,16 +11,16 @@
 var GlobalPromise = global.Promise;
 
 var PromiseChain = utils.ImportNow("PromiseChain");
-var PromiseDeferred = utils.ImportNow("PromiseDeferred");
-var PromiseResolved = utils.ImportNow("PromiseResolved");
+var PromiseDefer = utils.ImportNow("PromiseDefer");
+var PromiseAccept = utils.ImportNow("PromiseAccept");
 
 utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
   "chain", PromiseChain,
 ]);
 
 utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
-  "defer", PromiseDeferred,
-  "accept", PromiseResolved,
+  "defer", PromiseDefer,
+  "accept", PromiseAccept,
 ]);
 
 })
diff --git a/src/js/promise.js b/src/js/promise.js
index bcf826a..42b772b 100644
--- a/src/js/promise.js
+++ b/src/js/promise.js
@@ -17,12 +17,13 @@
     utils.ImportNow("promise_combined_deferred_symbol");
 var promiseHasHandlerSymbol =
     utils.ImportNow("promise_has_handler_symbol");
-var promiseOnRejectSymbol = utils.ImportNow("promise_on_reject_symbol");
-var promiseOnResolveSymbol =
-    utils.ImportNow("promise_on_resolve_symbol");
+var promiseRejectReactionsSymbol =
+    utils.ImportNow("promise_reject_reactions_symbol");
+var promiseFulfillReactionsSymbol =
+    utils.ImportNow("promise_fulfill_reactions_symbol");
 var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
-var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
-var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
+var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
 var SpeciesConstructor;
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
@@ -33,22 +34,32 @@
 
 // -------------------------------------------------------------------
 
-// Status values: 0 = pending, +1 = resolved, -1 = rejected
+// [[PromiseState]] values:
+const kPending = 0;
+const kFulfilled = +1;
+const kRejected = -1;
+
 var lastMicrotaskId = 0;
 
+// ES#sec-createresolvingfunctions
+// CreateResolvingFunctions ( promise )
 function CreateResolvingFunctions(promise) {
   var alreadyResolved = false;
 
+  // ES#sec-promise-resolve-functions
+  // Promise Resolve Functions
   var resolve = value => {
     if (alreadyResolved === true) return;
     alreadyResolved = true;
-    PromiseResolve(promise, value);
+    FulfillPromise(promise, value);
   };
 
+  // ES#sec-promise-reject-functions
+  // Promise Reject Functions
   var reject = reason => {
     if (alreadyResolved === true) return;
     alreadyResolved = true;
-    PromiseReject(promise, reason);
+    RejectPromise(promise, reason);
   };
 
   return {
@@ -59,13 +70,16 @@
 }
 
 
+// ES#sec-promise-executor
+// Promise ( executor )
 var GlobalPromise = function Promise(resolver) {
   if (resolver === promiseRawSymbol) {
     return %_NewObject(GlobalPromise, new.target);
   }
   if (IS_UNDEFINED(new.target)) throw MakeTypeError(kNotAPromise, this);
-  if (!IS_CALLABLE(resolver))
+  if (!IS_CALLABLE(resolver)) {
     throw MakeTypeError(kResolverNotAFunction, resolver);
+  }
 
   var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
   var callbacks = CreateResolvingFunctions(promise);
@@ -85,27 +99,27 @@
 // Core functionality.
 
 function PromiseSet(promise, status, value, onResolve, onReject) {
-  SET_PRIVATE(promise, promiseStatusSymbol, status);
-  SET_PRIVATE(promise, promiseValueSymbol, value);
-  SET_PRIVATE(promise, promiseOnResolveSymbol, onResolve);
-  SET_PRIVATE(promise, promiseOnRejectSymbol, onReject);
+  SET_PRIVATE(promise, promiseStateSymbol, status);
+  SET_PRIVATE(promise, promiseResultSymbol, value);
+  SET_PRIVATE(promise, promiseFulfillReactionsSymbol, onResolve);
+  SET_PRIVATE(promise, promiseRejectReactionsSymbol, onReject);
   return promise;
 }
 
 function PromiseCreateAndSet(status, value) {
   var promise = new GlobalPromise(promiseRawSymbol);
   // If debug is active, notify about the newly created promise first.
-  if (DEBUG_IS_ACTIVE) PromiseSet(promise, 0, UNDEFINED);
+  if (DEBUG_IS_ACTIVE) PromiseSet(promise, kPending, UNDEFINED);
   return PromiseSet(promise, status, value);
 }
 
 function PromiseInit(promise) {
   return PromiseSet(
-      promise, 0, UNDEFINED, new InternalArray, new InternalArray)
+      promise, kPending, UNDEFINED, new InternalArray, new InternalArray)
 }
 
 function PromiseDone(promise, status, value, promiseQueue) {
-  if (GET_PRIVATE(promise, promiseStatusSymbol) === 0) {
+  if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
     var tasks = GET_PRIVATE(promise, promiseQueue);
     if (tasks.length) PromiseEnqueue(value, tasks, status);
     PromiseSet(promise, status, value);
@@ -139,7 +153,7 @@
   });
   if (instrumenting) {
     id = ++lastMicrotaskId;
-    name = status > 0 ? "Promise.resolve" : "Promise.reject";
+    name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
     %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
   }
 }
@@ -154,24 +168,27 @@
 
 // For bootstrapper.
 
+// ES#sec-ispromise IsPromise ( x )
 function IsPromise(x) {
-  return IS_RECEIVER(x) && HAS_DEFINED_PRIVATE(x, promiseStatusSymbol);
+  return IS_RECEIVER(x) && HAS_DEFINED_PRIVATE(x, promiseStateSymbol);
 }
 
 function PromiseCreate() {
   return new GlobalPromise(PromiseNopResolver)
 }
 
-function PromiseResolve(promise, x) {
+// ES#sec-fulfillpromise
+// FulfillPromise ( promise, value)
+function FulfillPromise(promise, x) {
   if (x === promise) {
-    return PromiseReject(promise, MakeTypeError(kPromiseCyclic, x));
+    return RejectPromise(promise, MakeTypeError(kPromiseCyclic, x));
   }
   if (IS_RECEIVER(x)) {
     // 25.4.1.3.2 steps 8-12
     try {
       var then = x.then;
     } catch (e) {
-      return PromiseReject(promise, e);
+      return RejectPromise(promise, e);
     }
     if (IS_CALLABLE(then)) {
       // PromiseResolveThenableJob
@@ -198,22 +215,26 @@
       return;
     }
   }
-  PromiseDone(promise, +1, x, promiseOnResolveSymbol);
+  PromiseDone(promise, kFulfilled, x, promiseFulfillReactionsSymbol);
 }
 
-function PromiseReject(promise, r) {
+// ES#sec-rejectpromise
+// RejectPromise ( promise, reason )
+function RejectPromise(promise, r) {
   // Check promise status to confirm that this reject has an effect.
   // Call runtime for callbacks to the debugger or for unhandled reject.
-  if (GET_PRIVATE(promise, promiseStatusSymbol) == 0) {
+  if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
     var debug_is_active = DEBUG_IS_ACTIVE;
     if (debug_is_active ||
         !HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
       %PromiseRejectEvent(promise, r, debug_is_active);
     }
   }
-  PromiseDone(promise, -1, r, promiseOnRejectSymbol)
+  PromiseDone(promise, kRejected, r, promiseRejectReactionsSymbol)
 }
 
+// ES#sec-newpromisecapability
+// NewPromiseCapability ( C )
 function NewPromiseCapability(C) {
   if (C === GlobalPromise) {
     // Optimized case, avoid extra closure.
@@ -240,23 +261,27 @@
   return result;
 }
 
-function PromiseDeferred() {
+// Unspecified V8-specific legacy function
+function PromiseDefer() {
   %IncrementUseCounter(kPromiseDefer);
   return NewPromiseCapability(this);
 }
 
-function PromiseResolved(x) {
+// Unspecified V8-specific legacy function
+function PromiseAccept(x) {
   %IncrementUseCounter(kPromiseAccept);
-  return %_Call(PromiseCast, this, x);
+  return %_Call(PromiseResolve, this, x);
 }
 
-function PromiseRejected(r) {
+// ES#sec-promise.reject
+// Promise.reject ( x )
+function PromiseReject(r) {
   if (!IS_RECEIVER(this)) {
-    throw MakeTypeError(kCalledOnNonObject, PromiseRejected);
+    throw MakeTypeError(kCalledOnNonObject, PromiseResolve);
   }
   if (this === GlobalPromise) {
     // Optimized case, avoid extra closure.
-    var promise = PromiseCreateAndSet(-1, r);
+    var promise = PromiseCreateAndSet(kRejected, r);
     // The debug event for this would always be an uncaught promise reject,
     // which is usually simply noise. Do not trigger that debug event.
     %PromiseRejectEvent(promise, r, false);
@@ -268,10 +293,21 @@
   }
 }
 
-// Multi-unwrapped chaining with thenable coercion.
+// Shortcut Promise.reject and Promise.resolve() implementations, used by
+// Async Functions implementation.
+function PromiseCreateRejected(r) {
+  return %_Call(PromiseReject, GlobalPromise, r);
+}
 
+function PromiseCreateResolved(x) {
+  return %_Call(PromiseResolve, GlobalPromise, x);
+}
+
+// ES#sec-promise.prototype.then
+// Promise.prototype.then ( onFulfilled, onRejected )
+// Multi-unwrapped chaining with thenable coercion.
 function PromiseThen(onResolve, onReject) {
-  var status = GET_PRIVATE(this, promiseStatusSymbol);
+  var status = GET_PRIVATE(this, promiseStateSymbol);
   if (IS_UNDEFINED(status)) {
     throw MakeTypeError(kNotAPromise, this);
   }
@@ -281,24 +317,25 @@
   onReject = IS_CALLABLE(onReject) ? onReject : PromiseIdRejectHandler;
   var deferred = NewPromiseCapability(constructor);
   switch (status) {
-    case 0:  // Pending
-      GET_PRIVATE(this, promiseOnResolveSymbol).push(onResolve, deferred);
-      GET_PRIVATE(this, promiseOnRejectSymbol).push(onReject, deferred);
+    case kPending:
+      GET_PRIVATE(this, promiseFulfillReactionsSymbol).push(onResolve,
+                                                            deferred);
+      GET_PRIVATE(this, promiseRejectReactionsSymbol).push(onReject, deferred);
       break;
-    case +1:  // Resolved
-      PromiseEnqueue(GET_PRIVATE(this, promiseValueSymbol),
+    case kFulfilled:
+      PromiseEnqueue(GET_PRIVATE(this, promiseResultSymbol),
                      [onResolve, deferred],
-                     +1);
+                     kFulfilled);
       break;
-    case -1:  // Rejected
+    case kRejected:
       if (!HAS_DEFINED_PRIVATE(this, promiseHasHandlerSymbol)) {
         // Promise has already been rejected, but had no handler.
         // Revoke previously triggered reject event.
         %PromiseRevokeReject(this);
       }
-      PromiseEnqueue(GET_PRIVATE(this, promiseValueSymbol),
+      PromiseEnqueue(GET_PRIVATE(this, promiseResultSymbol),
                      [onReject, deferred],
-                     -1);
+                     kRejected);
       break;
   }
   // Mark this promise as having handler.
@@ -306,21 +343,26 @@
   return deferred.promise;
 }
 
+// Unspecified V8-specific legacy function
 // Chain is left around for now as an alias for then
 function PromiseChain(onResolve, onReject) {
   %IncrementUseCounter(kPromiseChain);
   return %_Call(PromiseThen, this, onResolve, onReject);
 }
 
+// ES#sec-promise.prototype.catch
+// Promise.prototype.catch ( onRejected )
 function PromiseCatch(onReject) {
   return this.then(UNDEFINED, onReject);
 }
 
 // Combinators.
 
-function PromiseCast(x) {
+// ES#sec-promise.resolve
+// Promise.resolve ( x )
+function PromiseResolve(x) {
   if (!IS_RECEIVER(this)) {
-    throw MakeTypeError(kCalledOnNonObject, PromiseCast);
+    throw MakeTypeError(kCalledOnNonObject, PromiseResolve);
   }
   if (IsPromise(x) && x.constructor === this) return x;
 
@@ -329,6 +371,8 @@
   return promiseCapability.promise;
 }
 
+// ES#sec-promise.all
+// Promise.all ( iterable )
 function PromiseAll(iterable) {
   if (!IS_RECEIVER(this)) {
     throw MakeTypeError(kCalledOnNonObject, "Promise.all");
@@ -378,6 +422,8 @@
   return deferred.promise;
 }
 
+// ES#sec-promise.race
+// Promise.race ( iterable )
 function PromiseRace(iterable) {
   if (!IS_RECEIVER(this)) {
     throw MakeTypeError(kCalledOnNonObject, PromiseRace);
@@ -399,7 +445,7 @@
 // Utility for debugger
 
 function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
-  var queue = GET_PRIVATE(promise, promiseOnRejectSymbol);
+  var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
   if (IS_UNDEFINED(queue)) return false;
   for (var i = 0; i < queue.length; i += 2) {
     var handler = queue[i];
@@ -432,10 +478,10 @@
                   DONT_ENUM | READ_ONLY);
 
 utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
-  "reject", PromiseRejected,
+  "reject", PromiseReject,
   "all", PromiseAll,
   "race", PromiseRace,
-  "resolve", PromiseCast
+  "resolve", PromiseResolve
 ]);
 
 utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
@@ -448,9 +494,11 @@
   "promise_chain", PromiseChain,
   "promise_create", PromiseCreate,
   "promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
-  "promise_reject", PromiseReject,
-  "promise_resolve", PromiseResolve,
+  "promise_reject", RejectPromise,
+  "promise_resolve", FulfillPromise,
   "promise_then", PromiseThen,
+  "promise_create_rejected", PromiseCreateRejected,
+  "promise_create_resolved", PromiseCreateResolved
 ]);
 
 // This allows extras to create promises quickly without building extra
@@ -458,18 +506,22 @@
 // promise without having to hold on to those closures forever.
 utils.InstallFunctions(extrasUtils, 0, [
   "createPromise", PromiseCreate,
-  "resolvePromise", PromiseResolve,
-  "rejectPromise", PromiseReject
+  "resolvePromise", FulfillPromise,
+  "rejectPromise", RejectPromise
 ]);
 
 // TODO(v8:4567): Allow experimental natives to remove function prototype
-[PromiseChain, PromiseDeferred, PromiseResolved].forEach(
+[PromiseChain, PromiseDefer, PromiseAccept].forEach(
     fn => %FunctionRemovePrototype(fn));
 
 utils.Export(function(to) {
   to.PromiseChain = PromiseChain;
-  to.PromiseDeferred = PromiseDeferred;
-  to.PromiseResolved = PromiseResolved;
+  to.PromiseDefer = PromiseDefer;
+  to.PromiseAccept = PromiseAccept;
+
+  to.PromiseCreateRejected = PromiseCreateRejected;
+  to.PromiseCreateResolved = PromiseCreateResolved;
+  to.PromiseThen = PromiseThen;
 });
 
 })
diff --git a/src/js/regexp.js b/src/js/regexp.js
index cc8cb41..719a081 100644
--- a/src/js/regexp.js
+++ b/src/js/regexp.js
@@ -4,14 +4,11 @@
 
 (function(global, utils) {
 
-'use strict';
-
 %CheckIsBootstrapping();
 
 // -------------------------------------------------------------------
 // Imports
 
-var AddIndexedProperty;
 var ExpandReplacement;
 var GlobalArray = global.Array;
 var GlobalObject = global.Object;
@@ -29,7 +26,6 @@
 var SpeciesConstructor;
 
 utils.Import(function(from) {
-  AddIndexedProperty = from.AddIndexedProperty;
   ExpandReplacement = from.ExpandReplacement;
   MakeTypeError = from.MakeTypeError;
   MaxSimple = from.MaxSimple;
@@ -502,7 +498,7 @@
   var result;
   if (size === 0) {
     result = RegExpSubclassExec(splitter, string);
-    if (IS_NULL(result)) AddIndexedProperty(array, 0, string);
+    if (IS_NULL(result)) %AddElement(array, 0, string);
     return array;
   }
   var stringIndex = prevStringIndex;
@@ -515,10 +511,10 @@
       stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
     } else {
       var end = MinSimple(TO_LENGTH(splitter.lastIndex), size);
-      if (end === stringIndex) {
+      if (end === prevStringIndex) {
         stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
       } else {
-        AddIndexedProperty(
+        %AddElement(
             array, arrayIndex,
             %_SubString(string, prevStringIndex, stringIndex));
         arrayIndex++;
@@ -526,7 +522,7 @@
         prevStringIndex = end;
         var numberOfCaptures = MaxSimple(TO_LENGTH(result.length), 0);
         for (var i = 1; i < numberOfCaptures; i++) {
-          AddIndexedProperty(array, arrayIndex, result[i]);
+          %AddElement(array, arrayIndex, result[i]);
           arrayIndex++;
           if (arrayIndex === lim) return array;
         }
@@ -534,7 +530,7 @@
       }
     }
   }
-  AddIndexedProperty(array, arrayIndex,
+  %AddElement(array, arrayIndex,
                      %_SubString(string, prevStringIndex, size));
   return array;
 }
diff --git a/src/js/runtime.js b/src/js/runtime.js
index 8e4f283..a6a0b4d 100644
--- a/src/js/runtime.js
+++ b/src/js/runtime.js
@@ -43,19 +43,6 @@
 */
 
 
-// This function should be called rather than %AddElement in contexts where the
-// argument might not be less than 2**32-1. ES2015 ToLength semantics mean that
-// this is a concern at basically all callsites.
-function AddIndexedProperty(obj, index, value) {
-  if (index === TO_UINT32(index) && index !== kMaxUint32) {
-    %AddElement(obj, index, value);
-  } else {
-    %AddNamedProperty(obj, TO_STRING(index), value, NONE);
-  }
-}
-%SetForceInlineFlag(AddIndexedProperty);
-
-
 function ToPositiveInteger(x, rangeErrorIndex) {
   var i = TO_INTEGER_MAP_MINUS_ZERO(x);
   if (i < 0) throw MakeRangeError(rangeErrorIndex);
@@ -122,7 +109,6 @@
 // Exports
 
 utils.Export(function(to) {
-  to.AddIndexedProperty = AddIndexedProperty;
   to.MaxSimple = MaxSimple;
   to.MinSimple = MinSimple;
   to.ToPositiveInteger = ToPositiveInteger;
diff --git a/src/js/string.js b/src/js/string.js
index 0eb394e..badb2b4 100644
--- a/src/js/string.js
+++ b/src/js/string.js
@@ -57,30 +57,6 @@
 }
 
 
-// ECMA-262, section 15.5.4.4
-function StringCharAtJS(pos) {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.charAt");
-
-  var result = %_StringCharAt(this, pos);
-  if (%_IsSmi(result)) {
-    result = %_StringCharAt(TO_STRING(this), TO_INTEGER(pos));
-  }
-  return result;
-}
-
-
-// ECMA-262 section 15.5.4.5
-function StringCharCodeAtJS(pos) {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.charCodeAt");
-
-  var result = %_StringCharCodeAt(this, pos);
-  if (!%_IsSmi(result)) {
-    result = %_StringCharCodeAt(TO_STRING(this), TO_INTEGER(pos));
-  }
-  return result;
-}
-
-
 // ECMA-262, section 15.5.4.6
 function StringConcat(other /* and more */) {  // length == 1
   "use strict";
@@ -845,13 +821,6 @@
 
 // -------------------------------------------------------------------
 
-// Set the String function and constructor.
-%FunctionSetPrototype(GlobalString, new GlobalString());
-
-// Set up the constructor property on the String prototype object.
-%AddNamedProperty(
-    GlobalString.prototype, "constructor", GlobalString, DONT_ENUM);
-
 // Set up the non-enumerable functions on the String object.
 utils.InstallFunctions(GlobalString, DONT_ENUM, [
   "fromCodePoint", StringFromCodePoint,
@@ -862,8 +831,6 @@
 utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
   "valueOf", StringValueOf,
   "toString", StringToString,
-  "charAt", StringCharAtJS,
-  "charCodeAt", StringCharCodeAtJS,
   "codePointAt", StringCodePointAt,
   "concat", StringConcat,
   "endsWith", StringEndsWith,
@@ -909,7 +876,6 @@
 
 utils.Export(function(to) {
   to.ExpandReplacement = ExpandReplacement;
-  to.StringCharAt = StringCharAtJS;
   to.StringIndexOf = StringIndexOf;
   to.StringLastIndexOf = StringLastIndexOf;
   to.StringMatch = StringMatchJS;
diff --git a/src/js/typedarray.js b/src/js/typedarray.js
index 4fb174b..18f6dde 100644
--- a/src/js/typedarray.js
+++ b/src/js/typedarray.js
@@ -11,7 +11,6 @@
 // -------------------------------------------------------------------
 // Imports
 
-var AddIndexedProperty;
 // array.js has to come before typedarray.js for this to work
 var ArrayToString = utils.ImportNow("ArrayToString");
 var ArrayValues;
@@ -22,7 +21,6 @@
 var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
 var GlobalDataView = global.DataView;
 var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
 var InnerArrayCopyWithin;
 var InnerArrayEvery;
 var InnerArrayFill;
@@ -71,7 +69,6 @@
 TYPED_ARRAYS(DECLARE_GLOBALS)
 
 utils.Import(function(from) {
-  AddIndexedProperty = from.AddIndexedProperty;
   ArrayValues = from.ArrayValues;
   GetIterator = from.GetIterator;
   GetMethod = from.GetMethod;
diff --git a/src/js/uri.js b/src/js/uri.js
index dca83c9..19bfbd3 100644
--- a/src/js/uri.js
+++ b/src/js/uri.js
@@ -37,72 +37,6 @@
   return -1;
 }
 
-// Does the char code correspond to an alpha-numeric char.
-function isAlphaNumeric(cc) {
-  // a - z
-  if (97 <= cc && cc <= 122) return true;
-  // A - Z
-  if (65 <= cc && cc <= 90) return true;
-  // 0 - 9
-  if (48 <= cc && cc <= 57) return true;
-
-  return false;
-}
-
-// Lazily initialized.
-var hexCharCodeArray = 0;
-
-function URIAddEncodedOctetToBuffer(octet, result, index) {
-  result[index++] = 37; // Char code of '%'.
-  result[index++] = hexCharCodeArray[octet >> 4];
-  result[index++] = hexCharCodeArray[octet & 0x0F];
-  return index;
-}
-
-function URIEncodeOctets(octets, result, index) {
-  if (hexCharCodeArray === 0) {
-    hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
-                        65, 66, 67, 68, 69, 70];
-  }
-  index = URIAddEncodedOctetToBuffer(octets[0], result, index);
-  if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
-  if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
-  if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index);
-  return index;
-}
-
-function URIEncodeSingle(cc, result, index) {
-  var x = (cc >> 12) & 0xF;
-  var y = (cc >> 6) & 63;
-  var z = cc & 63;
-  var octets = new InternalArray(3);
-  if (cc <= 0x007F) {
-    octets[0] = cc;
-  } else if (cc <= 0x07FF) {
-    octets[0] = y + 192;
-    octets[1] = z + 128;
-  } else {
-    octets[0] = x + 224;
-    octets[1] = y + 128;
-    octets[2] = z + 128;
-  }
-  return URIEncodeOctets(octets, result, index);
-}
-
-function URIEncodePair(cc1 , cc2, result, index) {
-  var u = ((cc1 >> 6) & 0xF) + 1;
-  var w = (cc1 >> 2) & 0xF;
-  var x = cc1 & 3;
-  var y = (cc2 >> 6) & 0xF;
-  var z = cc2 & 63;
-  var octets = new InternalArray(4);
-  octets[0] = (u >> 2) + 240;
-  octets[1] = (((u & 3) << 4) | w) + 128;
-  octets[2] = ((x << 4) | y) + 128;
-  octets[3] = z + 128;
-  return URIEncodeOctets(octets, result, index);
-}
-
 function URIHexCharsToCharCode(highChar, lowChar) {
   var highCode = HexValueOf(highChar);
   var lowCode = HexValueOf(lowChar);
@@ -168,37 +102,6 @@
 }
 
 // ECMA-262, section 15.1.3
-function Encode(uri, unescape) {
-  uri = TO_STRING(uri);
-  var uriLength = uri.length;
-  var array = new InternalArray(uriLength);
-  var index = 0;
-  for (var k = 0; k < uriLength; k++) {
-    var cc1 = %_StringCharCodeAt(uri, k);
-    if (unescape(cc1)) {
-      array[index++] = cc1;
-    } else {
-      if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw MakeURIError();
-      if (cc1 < 0xD800 || cc1 > 0xDBFF) {
-        index = URIEncodeSingle(cc1, array, index);
-      } else {
-        k++;
-        if (k == uriLength) throw MakeURIError();
-        var cc2 = %_StringCharCodeAt(uri, k);
-        if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw MakeURIError();
-        index = URIEncodePair(cc1, cc2, array, index);
-      }
-    }
-  }
-
-  var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
-  for (var i = 0; i < array.length; i++) {
-    %_OneByteSeqStringSetChar(i, array[i], result);
-  }
-  return result;
-}
-
-// ECMA-262, section 15.1.3
 function Decode(uri, reserved) {
   uri = TO_STRING(uri);
   var uriLength = uri.length;
@@ -316,52 +219,6 @@
   return Decode(component, reservedPredicate);
 }
 
-// ECMA-262 - 15.1.3.3.
-function URIEncode(uri) {
-  var unescapePredicate = function(cc) {
-    if (isAlphaNumeric(cc)) return true;
-    // !
-    if (cc == 33) return true;
-    // #$
-    if (35 <= cc && cc <= 36) return true;
-    // &'()*+,-./
-    if (38 <= cc && cc <= 47) return true;
-    // :;
-    if (58 <= cc && cc <= 59) return true;
-    // =
-    if (cc == 61) return true;
-    // ?@
-    if (63 <= cc && cc <= 64) return true;
-    // _
-    if (cc == 95) return true;
-    // ~
-    if (cc == 126) return true;
-
-    return false;
-  };
-  return Encode(uri, unescapePredicate);
-}
-
-// ECMA-262 - 15.1.3.4
-function URIEncodeComponent(component) {
-  var unescapePredicate = function(cc) {
-    if (isAlphaNumeric(cc)) return true;
-    // !
-    if (cc == 33) return true;
-    // '()*
-    if (39 <= cc && cc <= 42) return true;
-    // -.
-    if (45 <= cc && cc <= 46) return true;
-    // _
-    if (cc == 95) return true;
-    // ~
-    if (cc == 126) return true;
-
-    return false;
-  };
-  return Encode(component, unescapePredicate);
-}
-
 // -------------------------------------------------------------------
 // Install exported functions.
 
@@ -371,9 +228,7 @@
   "escape", URIEscapeJS,
   "unescape", URIUnescapeJS,
   "decodeURI", URIDecode,
-  "decodeURIComponent", URIDecodeComponent,
-  "encodeURI", URIEncode,
-  "encodeURIComponent", URIEncodeComponent
+  "decodeURIComponent", URIDecodeComponent
 ]);
 
 })
diff --git a/src/js/v8natives.js b/src/js/v8natives.js
index 5185c62..44be941 100644
--- a/src/js/v8natives.js
+++ b/src/js/v8natives.js
@@ -20,9 +20,6 @@
 var MathAbs;
 var NaN = %GetRootNaN();
 var ObjectToString = utils.ImportNow("object_to_string");
-var ObserveBeginPerformSplice;
-var ObserveEndPerformSplice;
-var ObserveEnqueueSpliceRecord;
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
@@ -30,9 +27,6 @@
   MakeSyntaxError = from.MakeSyntaxError;
   MakeTypeError = from.MakeTypeError;
   MathAbs = from.MathAbs;
-  ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
-  ObserveEndPerformSplice = from.ObserveEndPerformSplice;
-  ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
 });
 
 // ----------------------------------------------------------------------------
@@ -148,348 +142,6 @@
   return %PropertyIsEnumerable(TO_OBJECT(this), P);
 }
 
-
-// Extensions for providing property getters and setters.
-function ObjectDefineGetter(name, fun) {
-  var receiver = this;
-  if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
-    receiver = %GlobalProxy(ObjectDefineGetter);
-  }
-  if (!IS_CALLABLE(fun)) {
-    throw MakeTypeError(kObjectGetterExpectingFunction);
-  }
-  var desc = new PropertyDescriptor();
-  desc.setGet(fun);
-  desc.setEnumerable(true);
-  desc.setConfigurable(true);
-  DefineOwnProperty(TO_OBJECT(receiver), TO_NAME(name), desc, false);
-}
-
-
-function ObjectLookupGetter(name) {
-  var receiver = this;
-  if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
-    receiver = %GlobalProxy(ObjectLookupGetter);
-  }
-  return %LookupAccessor(TO_OBJECT(receiver), TO_NAME(name), GETTER);
-}
-
-
-function ObjectDefineSetter(name, fun) {
-  var receiver = this;
-  if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
-    receiver = %GlobalProxy(ObjectDefineSetter);
-  }
-  if (!IS_CALLABLE(fun)) {
-    throw MakeTypeError(kObjectSetterExpectingFunction);
-  }
-  var desc = new PropertyDescriptor();
-  desc.setSet(fun);
-  desc.setEnumerable(true);
-  desc.setConfigurable(true);
-  DefineOwnProperty(TO_OBJECT(receiver), TO_NAME(name), desc, false);
-}
-
-
-function ObjectLookupSetter(name) {
-  var receiver = this;
-  if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
-    receiver = %GlobalProxy(ObjectLookupSetter);
-  }
-  return %LookupAccessor(TO_OBJECT(receiver), TO_NAME(name), SETTER);
-}
-
-
-// ES6 6.2.4.1
-function IsAccessorDescriptor(desc) {
-  if (IS_UNDEFINED(desc)) return false;
-  return desc.hasGetter() || desc.hasSetter();
-}
-
-
-// ES6 6.2.4.2
-function IsDataDescriptor(desc) {
-  if (IS_UNDEFINED(desc)) return false;
-  return desc.hasValue() || desc.hasWritable();
-}
-
-
-// ES6 6.2.4.3
-function IsGenericDescriptor(desc) {
-  if (IS_UNDEFINED(desc)) return false;
-  return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
-}
-
-
-function IsInconsistentDescriptor(desc) {
-  return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
-}
-
-
-// Harmony Proxies
-function FromGenericPropertyDescriptor(desc) {
-  if (IS_UNDEFINED(desc)) return desc;
-  var obj = new GlobalObject();
-
-  if (desc.hasValue()) {
-    %AddNamedProperty(obj, "value", desc.getValue(), NONE);
-  }
-  if (desc.hasWritable()) {
-    %AddNamedProperty(obj, "writable", desc.isWritable(), NONE);
-  }
-  if (desc.hasGetter()) {
-    %AddNamedProperty(obj, "get", desc.getGet(), NONE);
-  }
-  if (desc.hasSetter()) {
-    %AddNamedProperty(obj, "set", desc.getSet(), NONE);
-  }
-  if (desc.hasEnumerable()) {
-    %AddNamedProperty(obj, "enumerable", desc.isEnumerable(), NONE);
-  }
-  if (desc.hasConfigurable()) {
-    %AddNamedProperty(obj, "configurable", desc.isConfigurable(), NONE);
-  }
-  return obj;
-}
-
-
-// ES6 6.2.4.5
-function ToPropertyDescriptor(obj) {
-  if (!IS_RECEIVER(obj)) throw MakeTypeError(kPropertyDescObject, obj);
-
-  var desc = new PropertyDescriptor();
-
-  if ("enumerable" in obj) {
-    desc.setEnumerable(TO_BOOLEAN(obj.enumerable));
-  }
-
-  if ("configurable" in obj) {
-    desc.setConfigurable(TO_BOOLEAN(obj.configurable));
-  }
-
-  if ("value" in obj) {
-    desc.setValue(obj.value);
-  }
-
-  if ("writable" in obj) {
-    desc.setWritable(TO_BOOLEAN(obj.writable));
-  }
-
-  if ("get" in obj) {
-    var get = obj.get;
-    if (!IS_UNDEFINED(get) && !IS_CALLABLE(get)) {
-      throw MakeTypeError(kObjectGetterCallable, get);
-    }
-    desc.setGet(get);
-  }
-
-  if ("set" in obj) {
-    var set = obj.set;
-    if (!IS_UNDEFINED(set) && !IS_CALLABLE(set)) {
-      throw MakeTypeError(kObjectSetterCallable, set);
-    }
-    desc.setSet(set);
-  }
-
-  if (IsInconsistentDescriptor(desc)) {
-    throw MakeTypeError(kValueAndAccessor, obj);
-  }
-  return desc;
-}
-
-// TODO(cbruni): remove once callers have been removed
-function ToCompletePropertyDescriptor(obj) {
-  var desc = ToPropertyDescriptor(obj);
-  if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
-    if (!desc.hasValue()) desc.setValue(UNDEFINED);
-    if (!desc.hasWritable()) desc.setWritable(false);
-  } else {
-    // Is accessor descriptor.
-    if (!desc.hasGetter()) desc.setGet(UNDEFINED);
-    if (!desc.hasSetter()) desc.setSet(UNDEFINED);
-  }
-  if (!desc.hasEnumerable()) desc.setEnumerable(false);
-  if (!desc.hasConfigurable()) desc.setConfigurable(false);
-  return desc;
-}
-
-
-function PropertyDescriptor() {
-  // Initialize here so they are all in-object and have the same map.
-  // Default values from ES5 8.6.1.
-  this.value_ = UNDEFINED;
-  this.hasValue_ = false;
-  this.writable_ = false;
-  this.hasWritable_ = false;
-  this.enumerable_ = false;
-  this.hasEnumerable_ = false;
-  this.configurable_ = false;
-  this.hasConfigurable_ = false;
-  this.get_ = UNDEFINED;
-  this.hasGetter_ = false;
-  this.set_ = UNDEFINED;
-  this.hasSetter_ = false;
-}
-
-utils.SetUpLockedPrototype(PropertyDescriptor, [
-  "value_",
-  "hasValue_",
-  "writable_",
-  "hasWritable_",
-  "enumerable_",
-  "hasEnumerable_",
-  "configurable_",
-  "hasConfigurable_",
-  "get_",
-  "hasGetter_",
-  "set_",
-  "hasSetter_"
-], [
-  "toString", function PropertyDescriptor_ToString() {
-    return "[object PropertyDescriptor]";
-  },
-  "setValue", function PropertyDescriptor_SetValue(value) {
-    this.value_ = value;
-    this.hasValue_ = true;
-  },
-  "getValue", function PropertyDescriptor_GetValue() {
-    return this.value_;
-  },
-  "hasValue", function PropertyDescriptor_HasValue() {
-    return this.hasValue_;
-  },
-  "setEnumerable", function PropertyDescriptor_SetEnumerable(enumerable) {
-    this.enumerable_ = enumerable;
-      this.hasEnumerable_ = true;
-  },
-  "isEnumerable", function PropertyDescriptor_IsEnumerable() {
-    return this.enumerable_;
-  },
-  "hasEnumerable", function PropertyDescriptor_HasEnumerable() {
-    return this.hasEnumerable_;
-  },
-  "setWritable", function PropertyDescriptor_SetWritable(writable) {
-    this.writable_ = writable;
-    this.hasWritable_ = true;
-  },
-  "isWritable", function PropertyDescriptor_IsWritable() {
-    return this.writable_;
-  },
-  "hasWritable", function PropertyDescriptor_HasWritable() {
-    return this.hasWritable_;
-  },
-  "setConfigurable",
-  function PropertyDescriptor_SetConfigurable(configurable) {
-    this.configurable_ = configurable;
-    this.hasConfigurable_ = true;
-  },
-  "hasConfigurable", function PropertyDescriptor_HasConfigurable() {
-    return this.hasConfigurable_;
-  },
-  "isConfigurable", function PropertyDescriptor_IsConfigurable() {
-    return this.configurable_;
-  },
-  "setGet", function PropertyDescriptor_SetGetter(get) {
-    this.get_ = get;
-    this.hasGetter_ = true;
-  },
-  "getGet", function PropertyDescriptor_GetGetter() {
-    return this.get_;
-  },
-  "hasGetter", function PropertyDescriptor_HasGetter() {
-    return this.hasGetter_;
-  },
-  "setSet", function PropertyDescriptor_SetSetter(set) {
-    this.set_ = set;
-    this.hasSetter_ = true;
-  },
-  "getSet", function PropertyDescriptor_GetSetter() {
-    return this.set_;
-  },
-  "hasSetter", function PropertyDescriptor_HasSetter() {
-    return this.hasSetter_;
-  }
-]);
-
-
-// Converts an array returned from Runtime_GetOwnProperty to an actual
-// property descriptor. For a description of the array layout please
-// see the runtime.cc file.
-function ConvertDescriptorArrayToDescriptor(desc_array) {
-  if (IS_UNDEFINED(desc_array)) {
-    return UNDEFINED;
-  }
-
-  var desc = new PropertyDescriptor();
-  // This is an accessor.
-  if (desc_array[IS_ACCESSOR_INDEX]) {
-    desc.setGet(desc_array[GETTER_INDEX]);
-    desc.setSet(desc_array[SETTER_INDEX]);
-  } else {
-    desc.setValue(desc_array[VALUE_INDEX]);
-    desc.setWritable(desc_array[WRITABLE_INDEX]);
-  }
-  desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
-  desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
-
-  return desc;
-}
-
-
-// For Harmony proxies.
-function GetTrap(handler, name, defaultTrap) {
-  var trap = handler[name];
-  if (IS_UNDEFINED(trap)) {
-    if (IS_UNDEFINED(defaultTrap)) {
-      throw MakeTypeError(kIllegalInvocation);
-    }
-    trap = defaultTrap;
-  } else if (!IS_CALLABLE(trap)) {
-    throw MakeTypeError(kIllegalInvocation);
-  }
-  return trap;
-}
-
-
-function CallTrap1(handler, name, defaultTrap, x) {
-  return %_Call(GetTrap(handler, name, defaultTrap), handler, x);
-}
-
-
-function CallTrap2(handler, name, defaultTrap, x, y) {
-  return %_Call(GetTrap(handler, name, defaultTrap), handler, x, y);
-}
-
-
-// ES5 section 8.12.1.
-// TODO(jkummerow): Deprecated. Migrate all callers to
-// ObjectGetOwnPropertyDescriptor and delete this.
-function GetOwnPropertyJS(obj, v) {
-  var p = TO_NAME(v);
-  if (IS_PROXY(obj)) {
-    // TODO(rossberg): adjust once there is a story for symbols vs proxies.
-    if (IS_SYMBOL(v)) return UNDEFINED;
-
-    var handler = %JSProxyGetHandler(obj);
-    var descriptor = CallTrap1(
-                         handler, "getOwnPropertyDescriptor", UNDEFINED, p);
-    if (IS_UNDEFINED(descriptor)) return descriptor;
-    var desc = ToCompletePropertyDescriptor(descriptor);
-    if (!desc.isConfigurable()) {
-      throw MakeTypeError(kIllegalInvocation);
-    }
-    return desc;
-  }
-
-  // GetOwnProperty returns an array indexed by the constants
-  // defined in macros.py.
-  // If p is not a property on obj undefined is returned.
-  var props = %GetOwnProperty_Legacy(TO_OBJECT(obj), p);
-
-  return ConvertDescriptorArrayToDescriptor(props);
-}
-
-
 // ES6 7.3.9
 function GetMethod(obj, p) {
   var func = obj[p];
@@ -498,252 +150,6 @@
   throw MakeTypeError(kCalledNonCallable, typeof func);
 }
 
-
-// Harmony proxies.
-function DefineProxyProperty(obj, p, attributes, should_throw) {
-  // TODO(rossberg): adjust once there is a story for symbols vs proxies.
-  if (IS_SYMBOL(p)) return false;
-
-  var handler = %JSProxyGetHandler(obj);
-  var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
-  if (!result) {
-    if (should_throw) {
-      throw MakeTypeError(kIllegalInvocation);
-    } else {
-      return false;
-    }
-  }
-  return true;
-}
-
-
-// ES6 9.1.6 [[DefineOwnProperty]](P, Desc)
-function DefineObjectProperty(obj, p, desc, should_throw) {
-  var current_array = %GetOwnProperty_Legacy(obj, TO_NAME(p));
-  var current = ConvertDescriptorArrayToDescriptor(current_array);
-  var extensible = %object_is_extensible(obj);
-
-  if (IS_UNDEFINED(current) && !extensible) {
-    if (should_throw) {
-      throw MakeTypeError(kDefineDisallowed, p);
-    } else {
-      return false;
-    }
-  }
-
-  if (!IS_UNDEFINED(current)) {
-    if ((IsGenericDescriptor(desc) ||
-         IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
-        (!desc.hasEnumerable() ||
-         %SameValue(desc.isEnumerable(), current.isEnumerable())) &&
-        (!desc.hasConfigurable() ||
-         %SameValue(desc.isConfigurable(), current.isConfigurable())) &&
-        (!desc.hasWritable() ||
-         %SameValue(desc.isWritable(), current.isWritable())) &&
-        (!desc.hasValue() ||
-         %SameValue(desc.getValue(), current.getValue())) &&
-        (!desc.hasGetter() ||
-         %SameValue(desc.getGet(), current.getGet())) &&
-        (!desc.hasSetter() ||
-         %SameValue(desc.getSet(), current.getSet()))) {
-      return true;
-    }
-    if (!current.isConfigurable()) {
-      // Step 7
-      if (desc.isConfigurable() ||
-          (desc.hasEnumerable() &&
-           desc.isEnumerable() != current.isEnumerable())) {
-        if (should_throw) {
-          throw MakeTypeError(kRedefineDisallowed, p);
-        } else {
-          return false;
-        }
-      }
-      // Step 8
-      if (!IsGenericDescriptor(desc)) {
-        // Step 9a
-        if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
-          if (should_throw) {
-            throw MakeTypeError(kRedefineDisallowed, p);
-          } else {
-            return false;
-          }
-        }
-        // Step 10a
-        if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
-          var currentIsWritable = current.isWritable();
-          if (currentIsWritable != desc.isWritable()) {
-            if (!currentIsWritable) {
-              if (should_throw) {
-                throw MakeTypeError(kRedefineDisallowed, p);
-              } else {
-                return false;
-              }
-            }
-          }
-          if (!currentIsWritable && desc.hasValue() &&
-              !%SameValue(desc.getValue(), current.getValue())) {
-            if (should_throw) {
-              throw MakeTypeError(kRedefineDisallowed, p);
-            } else {
-              return false;
-            }
-          }
-        }
-        // Step 11
-        if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
-          if (desc.hasSetter() &&
-              !%SameValue(desc.getSet(), current.getSet())) {
-            if (should_throw) {
-              throw MakeTypeError(kRedefineDisallowed, p);
-            } else {
-              return false;
-            }
-          }
-          if (desc.hasGetter() && !%SameValue(desc.getGet(),current.getGet())) {
-            if (should_throw) {
-              throw MakeTypeError(kRedefineDisallowed, p);
-            } else {
-              return false;
-            }
-          }
-        }
-      }
-    }
-  }
-
-  // Send flags - enumerable and configurable are common - writable is
-  // only send to the data descriptor.
-  // Take special care if enumerable and configurable is not defined on
-  // desc (we need to preserve the existing values from current).
-  var flag = NONE;
-  if (desc.hasEnumerable()) {
-    flag |= desc.isEnumerable() ? 0 : DONT_ENUM;
-  } else if (!IS_UNDEFINED(current)) {
-    flag |= current.isEnumerable() ? 0 : DONT_ENUM;
-  } else {
-    flag |= DONT_ENUM;
-  }
-
-  if (desc.hasConfigurable()) {
-    flag |= desc.isConfigurable() ? 0 : DONT_DELETE;
-  } else if (!IS_UNDEFINED(current)) {
-    flag |= current.isConfigurable() ? 0 : DONT_DELETE;
-  } else
-    flag |= DONT_DELETE;
-
-  if (IsDataDescriptor(desc) ||
-      (IsGenericDescriptor(desc) &&
-       (IS_UNDEFINED(current) || IsDataDescriptor(current)))) {
-    // There are 3 cases that lead here:
-    // Step 4a - defining a new data property.
-    // Steps 9b & 12 - replacing an existing accessor property with a data
-    //                 property.
-    // Step 12 - updating an existing data property with a data or generic
-    //           descriptor.
-
-    if (desc.hasWritable()) {
-      flag |= desc.isWritable() ? 0 : READ_ONLY;
-    } else if (!IS_UNDEFINED(current)) {
-      flag |= current.isWritable() ? 0 : READ_ONLY;
-    } else {
-      flag |= READ_ONLY;
-    }
-
-    var value = UNDEFINED;  // Default value is undefined.
-    if (desc.hasValue()) {
-      value = desc.getValue();
-    } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
-      value = current.getValue();
-    }
-
-    %DefineDataPropertyUnchecked(obj, p, value, flag);
-  } else {
-    // There are 3 cases that lead here:
-    // Step 4b - defining a new accessor property.
-    // Steps 9c & 12 - replacing an existing data property with an accessor
-    //                 property.
-    // Step 12 - updating an existing accessor property with an accessor
-    //           descriptor.
-    var getter = null;
-    if (desc.hasGetter()) {
-      getter = desc.getGet();
-    } else if (IsAccessorDescriptor(current) && current.hasGetter()) {
-      getter = current.getGet();
-    }
-    var setter = null;
-    if (desc.hasSetter()) {
-      setter = desc.getSet();
-    } else if (IsAccessorDescriptor(current) && current.hasSetter()) {
-      setter = current.getSet();
-    }
-    %DefineAccessorPropertyUnchecked(obj, p, getter, setter, flag);
-  }
-  return true;
-}
-
-
-// ES5 section 15.4.5.1.
-function DefineArrayProperty(obj, p, desc, should_throw) {
-  // Step 3 - Special handling for array index.
-  if (!IS_SYMBOL(p)) {
-    var index = TO_UINT32(p);
-    var emit_splice = false;
-    if (TO_STRING(index) == p && index != 4294967295) {
-      var length = obj.length;
-      if (index >= length && %IsObserved(obj)) {
-        emit_splice = true;
-        ObserveBeginPerformSplice(obj);
-      }
-
-      var length_desc = GetOwnPropertyJS(obj, "length");
-      if ((index >= length && !length_desc.isWritable()) ||
-          !DefineObjectProperty(obj, p, desc, true)) {
-        if (emit_splice)
-          ObserveEndPerformSplice(obj);
-        if (should_throw) {
-          throw MakeTypeError(kDefineDisallowed, p);
-        } else {
-          return false;
-        }
-      }
-      if (index >= length) {
-        obj.length = index + 1;
-      }
-      if (emit_splice) {
-        ObserveEndPerformSplice(obj);
-        ObserveEnqueueSpliceRecord(obj, length, [], index + 1 - length);
-      }
-      return true;
-    }
-  }
-
-  // Step 5 - Fallback to default implementation.
-  return DefineObjectProperty(obj, p, desc, should_throw);
-}
-
-
-// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
-function DefineOwnProperty(obj, p, desc, should_throw) {
-  if (IS_PROXY(obj)) {
-    // TODO(rossberg): adjust once there is a story for symbols vs proxies.
-    if (IS_SYMBOL(p)) return false;
-
-    var attributes = FromGenericPropertyDescriptor(desc);
-    return DefineProxyProperty(obj, p, attributes, should_throw);
-  } else if (IS_ARRAY(obj)) {
-    return DefineArrayProperty(obj, p, desc, should_throw);
-  } else {
-    return DefineObjectProperty(obj, p, desc, should_throw);
-  }
-}
-
-
-// ES6 section 19.1.2.9
-function ObjectGetPrototypeOf(obj) {
-  return %_GetPrototype(TO_OBJECT(obj));
-}
-
 // ES6 section 19.1.2.18.
 function ObjectSetPrototypeOf(obj, proto) {
   CHECK_OBJECT_COERCIBLE(obj, "Object.setPrototypeOf");
@@ -759,50 +165,9 @@
   return obj;
 }
 
-
-// ES5 section 15.2.3.6.
-function ObjectDefineProperty(obj, p, attributes) {
-  // The new pure-C++ implementation doesn't support O.o.
-  // TODO(jkummerow): Implement missing features and remove fallback path.
-  if (%IsObserved(obj)) {
-    if (!IS_RECEIVER(obj)) {
-      throw MakeTypeError(kCalledOnNonObject, "Object.defineProperty");
-    }
-    var name = TO_NAME(p);
-    var desc = ToPropertyDescriptor(attributes);
-    DefineOwnProperty(obj, name, desc, true);
-    return obj;
-  }
-  return %ObjectDefineProperty(obj, p, attributes);
-}
-
-
-// ES5 section 15.2.3.7.
-function ObjectDefineProperties(obj, properties) {
-  // The new pure-C++ implementation doesn't support O.o.
-  // TODO(jkummerow): Implement missing features and remove fallback path.
-  if (%IsObserved(obj)) {
-    if (!IS_RECEIVER(obj)) {
-      throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
-    }
-    var props = TO_OBJECT(properties);
-    var names = %GetOwnPropertyKeys(props, PROPERTY_FILTER_ONLY_ENUMERABLE);
-    var descriptors = new InternalArray();
-    for (var i = 0; i < names.length; i++) {
-      descriptors.push(ToPropertyDescriptor(props[names[i]]));
-    }
-    for (var i = 0; i < names.length; i++) {
-      DefineOwnProperty(obj, names[i], descriptors[i], true);
-    }
-    return obj;
-  }
-  return %ObjectDefineProperties(obj, properties);
-}
-
-
 // ES6 B.2.2.1.1
 function ObjectGetProto() {
-  return %_GetPrototype(TO_OBJECT(this));
+  return %object_get_prototype_of(this);
 }
 
 
@@ -842,26 +207,19 @@
   "valueOf", ObjectValueOf,
   "isPrototypeOf", ObjectIsPrototypeOf,
   "propertyIsEnumerable", ObjectPropertyIsEnumerable,
-  "__defineGetter__", ObjectDefineGetter,
-  "__lookupGetter__", ObjectLookupGetter,
-  "__defineSetter__", ObjectDefineSetter,
-  "__lookupSetter__", ObjectLookupSetter
+  // __defineGetter__ is added in bootstrapper.cc.
+  // __lookupGetter__ is added in bootstrapper.cc.
+  // __defineSetter__ is added in bootstrapper.cc.
+  // __lookupSetter__ is added in bootstrapper.cc.
 ]);
-utils.InstallGetterSetter(GlobalObject.prototype, "__proto__", ObjectGetProto,
-                    ObjectSetProto);
+utils.InstallGetterSetter(
+    GlobalObject.prototype, "__proto__", ObjectGetProto, ObjectSetProto);
 
 // Set up non-enumerable functions in the Object object.
 utils.InstallFunctions(GlobalObject, DONT_ENUM, [
-  // assign is added in bootstrapper.cc.
-  // keys is added in bootstrapper.cc.
-  "defineProperty", ObjectDefineProperty,
-  "defineProperties", ObjectDefineProperties,
-  "getPrototypeOf", ObjectGetPrototypeOf,
   "setPrototypeOf", ObjectSetPrototypeOf,
   // getOwnPropertySymbols is added in symbol.js.
-  // is is added in bootstrapper.cc.
-  // deliverChangeRecords, getNotifier, observe and unobserve are added
-  // in object-observe.js.
+  // Others are added in bootstrapper.cc.
 ]);
 
 
@@ -1096,8 +454,6 @@
   to.IsNaN = GlobalIsNaN;
   to.NumberIsNaN = NumberIsNaN;
   to.NumberIsInteger = NumberIsInteger;
-  to.ObjectDefineProperties = ObjectDefineProperties;
-  to.ObjectDefineProperty = ObjectDefineProperty;
   to.ObjectHasOwnProperty = GlobalObject.prototype.hasOwnProperty;
 });
 
diff --git a/src/keys.cc b/src/keys.cc
index f8b606c..17270eb 100644
--- a/src/keys.cc
+++ b/src/keys.cc
@@ -4,8 +4,10 @@
 
 #include "src/keys.h"
 
+#include "src/api-arguments.h"
 #include "src/elements.h"
 #include "src/factory.h"
+#include "src/identity-map.h"
 #include "src/isolate-inl.h"
 #include "src/objects-inl.h"
 #include "src/property-descriptor.h"
@@ -20,6 +22,33 @@
   }
 }
 
+namespace {
+
+static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
+  int len = array->length();
+  for (int i = 0; i < len; i++) {
+    Object* e = array->get(i);
+    if (!(e->IsName() || e->IsNumber())) return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+MaybeHandle<FixedArray> KeyAccumulator::GetKeys(
+    Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
+    GetKeysConversion keys_conversion, bool filter_proxy_keys) {
+  USE(ContainsOnlyValidKeys);
+  Isolate* isolate = object->GetIsolate();
+  KeyAccumulator accumulator(isolate, type, filter);
+  accumulator.set_filter_proxy_keys(filter_proxy_keys);
+  MAYBE_RETURN(accumulator.CollectKeys(object, object),
+               MaybeHandle<FixedArray>());
+  Handle<FixedArray> keys = accumulator.GetKeys(keys_conversion);
+  DCHECK(ContainsOnlyValidKeys(keys));
+  return keys;
+}
+
 Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
   if (length_ == 0) {
     return isolate_->factory()->empty_fixed_array();
@@ -111,7 +140,7 @@
     return AddSymbolKey(key);
   }
   if (filter_ & SKIP_STRINGS) return false;
-  // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
+  // Make sure we do not add keys to a proxy-level (see AddKeysFromJSProxy).
   DCHECK_LE(0, level_string_length_);
   // In some cases (e.g. proxies) we might get in String-converted ints which
   // should be added to the elements list instead of the properties. For
@@ -143,7 +172,7 @@
 bool KeyAccumulator::AddKey(uint32_t key) { return AddIntegerKey(key); }
 
 bool KeyAccumulator::AddIntegerKey(uint32_t key) {
-  // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
+  // Make sure we do not add keys to a proxy-level (see AddKeysFromJSProxy).
   // We mark proxy-levels with a negative length
   DCHECK_LE(0, level_string_length_);
   // Binary search over all but the last level. The last one might not be
@@ -209,17 +238,6 @@
   accessor->AddElementsToKeyAccumulator(array_like, this, convert);
 }
 
-void KeyAccumulator::AddKeysFromProxy(Handle<JSObject> array_like) {
-  // Proxies define a complete list of keys with no distinction of
-  // elements and properties, which breaks the normal assumption for the
-  // KeyAccumulator.
-  AddKeys(array_like, PROXY_MAGIC);
-  // Invert the current length to indicate a present proxy, so we can ignore
-  // element keys for this level. Otherwise we would not fully respect the order
-  // given by the proxy.
-  level_string_length_ = -level_string_length_;
-}
-
 MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
                                         Handle<FixedArray> keys,
                                         PropertyFilter filter) {
@@ -250,8 +268,8 @@
 }
 
 // Returns "nothing" in case of exception, "true" on success.
-Maybe<bool> KeyAccumulator::AddKeysFromProxy(Handle<JSProxy> proxy,
-                                             Handle<FixedArray> keys) {
+Maybe<bool> KeyAccumulator::AddKeysFromJSProxy(Handle<JSProxy> proxy,
+                                               Handle<FixedArray> keys) {
   if (filter_proxy_keys_) {
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(
         isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
@@ -312,6 +330,43 @@
   level_symbol_length_ = 0;
 }
 
+Maybe<bool> KeyAccumulator::CollectKeys(Handle<JSReceiver> receiver,
+                                        Handle<JSReceiver> object) {
+  // Proxies have no hidden prototype and we should not trigger the
+  // [[GetPrototypeOf]] trap on the last iteration when using
+  // AdvanceFollowingProxies.
+  if (type_ == OWN_ONLY && object->IsJSProxy()) {
+    MAYBE_RETURN(CollectOwnJSProxyKeys(receiver, Handle<JSProxy>::cast(object)),
+                 Nothing<bool>());
+    return Just(true);
+  }
+
+  PrototypeIterator::WhereToEnd end = type_ == OWN_ONLY
+                                          ? PrototypeIterator::END_AT_NON_HIDDEN
+                                          : PrototypeIterator::END_AT_NULL;
+  for (PrototypeIterator iter(isolate_, object,
+                              PrototypeIterator::START_AT_RECEIVER, end);
+       !iter.IsAtEnd();) {
+    Handle<JSReceiver> current =
+        PrototypeIterator::GetCurrent<JSReceiver>(iter);
+    Maybe<bool> result = Just(false);  // Dummy initialization.
+    if (current->IsJSProxy()) {
+      result = CollectOwnJSProxyKeys(receiver, Handle<JSProxy>::cast(current));
+    } else {
+      DCHECK(current->IsJSObject());
+      result = CollectOwnKeys(receiver, Handle<JSObject>::cast(current));
+    }
+    MAYBE_RETURN(result, Nothing<bool>());
+    if (!result.FromJust()) break;  // |false| means "stop iterating".
+    // Iterate through proxies but ignore access checks for the ALL_CAN_READ
+    // case on API objects for OWN_ONLY keys handled in CollectOwnKeys.
+    if (!iter.AdvanceFollowingProxiesIgnoringAccessChecks()) {
+      return Nothing<bool>();
+    }
+  }
+  return Just(true);
+}
+
 namespace {
 
 void TrySettingEmptyEnumCache(JSReceiver* object) {
@@ -363,6 +418,89 @@
 }
 
 namespace {
+static Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
+                                             Handle<FixedArray> array,
+                                             int length) {
+  DCHECK_LE(length, array->length());
+  if (array->length() == length) return array;
+  return isolate->factory()->CopyFixedArrayUpTo(array, length);
+}
+
+Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
+                                           Handle<JSObject> object) {
+  Handle<Map> map(object->map());
+  bool cache_enum_length = map->OnlyHasSimpleProperties();
+
+  Handle<DescriptorArray> descs =
+      Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+  int own_property_count = map->EnumLength();
+  // If the enum length of the given map is set to kInvalidEnumCache, this
+  // means that the map itself has never used the present enum cache. The
+  // first step to using the cache is to set the enum length of the map by
+  // counting the number of own descriptors that are ENUMERABLE_STRINGS.
+  if (own_property_count == kInvalidEnumCacheSentinel) {
+    own_property_count =
+        map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
+  } else {
+    DCHECK(
+        own_property_count ==
+        map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS));
+  }
+
+  if (descs->HasEnumCache()) {
+    Handle<FixedArray> keys(descs->GetEnumCache(), isolate);
+    // In case the number of properties required in the enum are actually
+    // present, we can reuse the enum cache. Otherwise, this means that the
+    // enum cache was generated for a previous (smaller) version of the
+    // Descriptor Array. In that case we regenerate the enum cache.
+    if (own_property_count <= keys->length()) {
+      isolate->counters()->enum_cache_hits()->Increment();
+      if (cache_enum_length) map->SetEnumLength(own_property_count);
+      return ReduceFixedArrayTo(isolate, keys, own_property_count);
+    }
+  }
+
+  if (descs->IsEmpty()) {
+    isolate->counters()->enum_cache_hits()->Increment();
+    if (cache_enum_length) map->SetEnumLength(0);
+    return isolate->factory()->empty_fixed_array();
+  }
+
+  isolate->counters()->enum_cache_misses()->Increment();
+
+  Handle<FixedArray> storage =
+      isolate->factory()->NewFixedArray(own_property_count);
+  Handle<FixedArray> indices =
+      isolate->factory()->NewFixedArray(own_property_count);
+
+  int size = map->NumberOfOwnDescriptors();
+  int index = 0;
+
+  for (int i = 0; i < size; i++) {
+    PropertyDetails details = descs->GetDetails(i);
+    if (details.IsDontEnum()) continue;
+    Object* key = descs->GetKey(i);
+    if (key->IsSymbol()) continue;
+    storage->set(index, key);
+    if (!indices.is_null()) {
+      if (details.type() != DATA) {
+        indices = Handle<FixedArray>();
+      } else {
+        FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+        int load_by_field_index = field_index.GetLoadByFieldIndex();
+        indices->set(index, Smi::FromInt(load_by_field_index));
+      }
+    }
+    index++;
+  }
+  DCHECK(index == storage->length());
+
+  DescriptorArray::SetEnumCache(descs, isolate, storage, indices);
+  if (cache_enum_length) {
+    map->SetEnumLength(own_property_count);
+  }
+  return storage;
+}
 
 template <bool fast_properties>
 Handle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
@@ -371,10 +509,10 @@
   Handle<FixedArray> keys;
   ElementsAccessor* accessor = object->GetElementsAccessor();
   if (fast_properties) {
-    keys = JSObject::GetFastEnumPropertyKeys(isolate, object);
+    keys = GetFastEnumPropertyKeys(isolate, object);
   } else {
     // TODO(cbruni): preallocate big enough array to also hold elements.
-    keys = JSObject::GetEnumPropertyKeys(object);
+    keys = KeyAccumulator::GetEnumPropertyKeys(isolate, object);
   }
   Handle<FixedArray> result =
       accessor->PrependElementIndices(object, keys, convert, ONLY_ENUMERABLE);
@@ -402,7 +540,7 @@
   }
   // We have no elements but possibly enumerable property keys, hence we can
   // directly initialize the enum cache.
-  return JSObject::GetFastEnumPropertyKeys(isolate, object);
+  return GetFastEnumPropertyKeys(isolate, object);
 }
 
 bool OnlyHasSimpleProperties(Map* map) {
@@ -457,9 +595,314 @@
 
 MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
     GetKeysConversion convert) {
-  return JSReceiver::GetKeys(receiver_, type_, ENUMERABLE_STRINGS, KEEP_NUMBERS,
+  return JSReceiver::GetKeys(receiver_, type_, filter_, KEEP_NUMBERS,
                              filter_proxy_keys_);
 }
 
+enum IndexedOrNamed { kIndexed, kNamed };
+
+// Returns |true| on success, |nothing| on exception.
+template <class Callback, IndexedOrNamed type>
+static Maybe<bool> GetKeysFromInterceptor(Handle<JSReceiver> receiver,
+                                          Handle<JSObject> object,
+                                          KeyAccumulator* accumulator) {
+  Isolate* isolate = accumulator->isolate();
+  if (type == kIndexed) {
+    if (!object->HasIndexedInterceptor()) return Just(true);
+  } else {
+    if (!object->HasNamedInterceptor()) return Just(true);
+  }
+  Handle<InterceptorInfo> interceptor(type == kIndexed
+                                          ? object->GetIndexedInterceptor()
+                                          : object->GetNamedInterceptor(),
+                                      isolate);
+  if ((accumulator->filter() & ONLY_ALL_CAN_READ) &&
+      !interceptor->all_can_read()) {
+    return Just(true);
+  }
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *object, Object::DONT_THROW);
+  Handle<JSObject> result;
+  if (!interceptor->enumerator()->IsUndefined()) {
+    Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
+    const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
+                                           : "interceptor-named-enum";
+    LOG(isolate, ApiObjectAccess(log_tag, *object));
+    result = args.Call(enum_fun);
+  }
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+  if (result.is_null()) return Just(true);
+  DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
+  // The accumulator takes care of string/symbol filtering.
+  if (type == kIndexed) {
+    accumulator->AddElementKeysFromInterceptor(result);
+  } else {
+    accumulator->AddKeys(result, DO_NOT_CONVERT);
+  }
+  return Just(true);
+}
+
+void KeyAccumulator::CollectOwnElementIndices(Handle<JSObject> object) {
+  if (filter_ & SKIP_STRINGS) return;
+  ElementsAccessor* accessor = object->GetElementsAccessor();
+  accessor->CollectElementIndices(object, this);
+}
+
+void KeyAccumulator::CollectOwnPropertyNames(Handle<JSObject> object) {
+  if (object->HasFastProperties()) {
+    int real_size = object->map()->NumberOfOwnDescriptors();
+    Handle<DescriptorArray> descs(object->map()->instance_descriptors(),
+                                  isolate_);
+    for (int i = 0; i < real_size; i++) {
+      PropertyDetails details = descs->GetDetails(i);
+      if ((details.attributes() & filter_) != 0) continue;
+      if (filter_ & ONLY_ALL_CAN_READ) {
+        if (details.kind() != kAccessor) continue;
+        Object* accessors = descs->GetValue(i);
+        if (!accessors->IsAccessorInfo()) continue;
+        if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+      }
+      Name* key = descs->GetKey(i);
+      if (key->FilterKey(filter_)) continue;
+      AddKey(key, DO_NOT_CONVERT);
+    }
+  } else if (object->IsJSGlobalObject()) {
+    GlobalDictionary::CollectKeysTo(
+        handle(object->global_dictionary(), isolate_), this, filter_);
+  } else {
+    NameDictionary::CollectKeysTo(
+        handle(object->property_dictionary(), isolate_), this, filter_);
+  }
+}
+
+// Returns |true| on success, |false| if prototype walking should be stopped,
+// |nothing| if an exception was thrown.
+Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
+                                           Handle<JSObject> object) {
+  NextPrototype();
+  // Check access rights if required.
+  if (object->IsAccessCheckNeeded() &&
+      !isolate_->MayAccess(handle(isolate_->context()), object)) {
+    // The cross-origin spec says that [[Enumerate]] shall return an empty
+    // iterator when it doesn't have access...
+    if (type_ == INCLUDE_PROTOS) {
+      return Just(false);
+    }
+    // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
+    DCHECK_EQ(OWN_ONLY, type_);
+    filter_ = static_cast<PropertyFilter>(filter_ | ONLY_ALL_CAN_READ);
+  }
+
+  CollectOwnElementIndices(object);
+
+  // Add the element keys from the interceptor.
+  Maybe<bool> success =
+      GetKeysFromInterceptor<v8::IndexedPropertyEnumeratorCallback, kIndexed>(
+          receiver, object, this);
+  MAYBE_RETURN(success, Nothing<bool>());
+
+  if (filter_ == ENUMERABLE_STRINGS) {
+    Handle<FixedArray> enum_keys =
+        KeyAccumulator::GetEnumPropertyKeys(isolate_, object);
+    AddKeys(enum_keys, DO_NOT_CONVERT);
+  } else {
+    CollectOwnPropertyNames(object);
+  }
+
+  // Add the property keys from the interceptor.
+  success = GetKeysFromInterceptor<v8::GenericNamedPropertyEnumeratorCallback,
+                                   kNamed>(receiver, object, this);
+  MAYBE_RETURN(success, Nothing<bool>());
+  return Just(true);
+}
+
+// static
+Handle<FixedArray> KeyAccumulator::GetEnumPropertyKeys(
+    Isolate* isolate, Handle<JSObject> object) {
+  if (object->HasFastProperties()) {
+    return GetFastEnumPropertyKeys(isolate, object);
+  } else if (object->IsJSGlobalObject()) {
+    Handle<GlobalDictionary> dictionary(object->global_dictionary(), isolate);
+    int length = dictionary->NumberOfEnumElements();
+    if (length == 0) {
+      return isolate->factory()->empty_fixed_array();
+    }
+    Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
+    dictionary->CopyEnumKeysTo(*storage);
+    return storage;
+  } else {
+    Handle<NameDictionary> dictionary(object->property_dictionary(), isolate);
+    int length = dictionary->NumberOfEnumElements();
+    if (length == 0) {
+      return isolate->factory()->empty_fixed_array();
+    }
+    Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
+    dictionary->CopyEnumKeysTo(*storage);
+    return storage;
+  }
+}
+
+// ES6 9.5.12
+// Returns |true| on success, |nothing| in case of exception.
+Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
+                                                  Handle<JSProxy> proxy) {
+  STACK_CHECK(isolate_, Nothing<bool>());
+  // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+  Handle<Object> handler(proxy->handler(), isolate_);
+  // 2. If handler is null, throw a TypeError exception.
+  // 3. Assert: Type(handler) is Object.
+  if (proxy->IsRevoked()) {
+    isolate_->Throw(*isolate_->factory()->NewTypeError(
+        MessageTemplate::kProxyRevoked, isolate_->factory()->ownKeys_string()));
+    return Nothing<bool>();
+  }
+  // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+  Handle<JSReceiver> target(proxy->target(), isolate_);
+  // 5. Let trap be ? GetMethod(handler, "ownKeys").
+  Handle<Object> trap;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate_, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
+                                        isolate_->factory()->ownKeys_string()),
+      Nothing<bool>());
+  // 6. If trap is undefined, then
+  if (trap->IsUndefined()) {
+    // 6a. Return target.[[OwnPropertyKeys]]().
+    return CollectOwnJSProxyTargetKeys(proxy, target);
+  }
+  // 7. Let trapResultArray be Call(trap, handler, «target»).
+  Handle<Object> trap_result_array;
+  Handle<Object> args[] = {target};
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate_, trap_result_array,
+      Execution::Call(isolate_, trap, handler, arraysize(args), args),
+      Nothing<bool>());
+  // 8. Let trapResult be ? CreateListFromArrayLike(trapResultArray,
+  //    «String, Symbol»).
+  Handle<FixedArray> trap_result;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate_, trap_result,
+      Object::CreateListFromArrayLike(isolate_, trap_result_array,
+                                      ElementTypes::kStringAndSymbol),
+      Nothing<bool>());
+  // 9. Let extensibleTarget be ? IsExtensible(target).
+  Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+  MAYBE_RETURN(maybe_extensible, Nothing<bool>());
+  bool extensible_target = maybe_extensible.FromJust();
+  // 10. Let targetKeys be ? target.[[OwnPropertyKeys]]().
+  Handle<FixedArray> target_keys;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, target_keys,
+                                   JSReceiver::OwnPropertyKeys(target),
+                                   Nothing<bool>());
+  // 11. (Assert)
+  // 12. Let targetConfigurableKeys be an empty List.
+  // To save memory, we're re-using target_keys and will modify it in-place.
+  Handle<FixedArray> target_configurable_keys = target_keys;
+  // 13. Let targetNonconfigurableKeys be an empty List.
+  Handle<FixedArray> target_nonconfigurable_keys =
+      isolate_->factory()->NewFixedArray(target_keys->length());
+  int nonconfigurable_keys_length = 0;
+  // 14. Repeat, for each element key of targetKeys:
+  for (int i = 0; i < target_keys->length(); ++i) {
+    // 14a. Let desc be ? target.[[GetOwnProperty]](key).
+    PropertyDescriptor desc;
+    Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+        isolate_, target, handle(target_keys->get(i), isolate_), &desc);
+    MAYBE_RETURN(found, Nothing<bool>());
+    // 14b. If desc is not undefined and desc.[[Configurable]] is false, then
+    if (found.FromJust() && !desc.configurable()) {
+      // 14b i. Append key as an element of targetNonconfigurableKeys.
+      target_nonconfigurable_keys->set(nonconfigurable_keys_length,
+                                       target_keys->get(i));
+      nonconfigurable_keys_length++;
+      // The key was moved, null it out in the original list.
+      target_keys->set(i, Smi::FromInt(0));
+    } else {
+      // 14c. Else,
+      // 14c i. Append key as an element of targetConfigurableKeys.
+      // (No-op, just keep it in |target_keys|.)
+    }
+  }
+  NextPrototype();  // Prepare for accumulating keys.
+  // 15. If extensibleTarget is true and targetNonconfigurableKeys is empty,
+  //     then:
+  if (extensible_target && nonconfigurable_keys_length == 0) {
+    // 15a. Return trapResult.
+    return AddKeysFromJSProxy(proxy, trap_result);
+  }
+  // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
+  Zone set_zone(isolate_->allocator());
+  const int kPresent = 1;
+  const int kGone = 0;
+  IdentityMap<int> unchecked_result_keys(isolate_->heap(), &set_zone);
+  int unchecked_result_keys_size = 0;
+  for (int i = 0; i < trap_result->length(); ++i) {
+    DCHECK(trap_result->get(i)->IsUniqueName());
+    Object* key = trap_result->get(i);
+    int* entry = unchecked_result_keys.Get(key);
+    if (*entry != kPresent) {
+      *entry = kPresent;
+      unchecked_result_keys_size++;
+    }
+  }
+  // 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
+  for (int i = 0; i < nonconfigurable_keys_length; ++i) {
+    Object* key = target_nonconfigurable_keys->get(i);
+    // 17a. If key is not an element of uncheckedResultKeys, throw a
+    //      TypeError exception.
+    int* found = unchecked_result_keys.Find(key);
+    if (found == nullptr || *found == kGone) {
+      isolate_->Throw(*isolate_->factory()->NewTypeError(
+          MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate_)));
+      return Nothing<bool>();
+    }
+    // 17b. Remove key from uncheckedResultKeys.
+    *found = kGone;
+    unchecked_result_keys_size--;
+  }
+  // 18. If extensibleTarget is true, return trapResult.
+  if (extensible_target) {
+    return AddKeysFromJSProxy(proxy, trap_result);
+  }
+  // 19. Repeat, for each key that is an element of targetConfigurableKeys:
+  for (int i = 0; i < target_configurable_keys->length(); ++i) {
+    Object* key = target_configurable_keys->get(i);
+    if (key->IsSmi()) continue;  // Zapped entry, was nonconfigurable.
+    // 19a. If key is not an element of uncheckedResultKeys, throw a
+    //      TypeError exception.
+    int* found = unchecked_result_keys.Find(key);
+    if (found == nullptr || *found == kGone) {
+      isolate_->Throw(*isolate_->factory()->NewTypeError(
+          MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate_)));
+      return Nothing<bool>();
+    }
+    // 19b. Remove key from uncheckedResultKeys.
+    *found = kGone;
+    unchecked_result_keys_size--;
+  }
+  // 20. If uncheckedResultKeys is not empty, throw a TypeError exception.
+  if (unchecked_result_keys_size != 0) {
+    DCHECK_GT(unchecked_result_keys_size, 0);
+    isolate_->Throw(*isolate_->factory()->NewTypeError(
+        MessageTemplate::kProxyOwnKeysNonExtensible));
+    return Nothing<bool>();
+  }
+  // 21. Return trapResult.
+  return AddKeysFromJSProxy(proxy, trap_result);
+}
+
+Maybe<bool> KeyAccumulator::CollectOwnJSProxyTargetKeys(
+    Handle<JSProxy> proxy, Handle<JSReceiver> target) {
+  // TODO(cbruni): avoid creating another KeyAccumulator
+  Handle<FixedArray> keys;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate_, keys, JSReceiver::OwnPropertyKeys(target), Nothing<bool>());
+  NextPrototype();  // Prepare for accumulating keys.
+  bool prev_filter_proxy_keys_ = filter_proxy_keys_;
+  filter_proxy_keys_ = false;
+  Maybe<bool> result = AddKeysFromJSProxy(proxy, keys);
+  filter_proxy_keys_ = prev_filter_proxy_keys_;
+  return result;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/keys.h b/src/keys.h
index 1fd3fc0..c73f109 100644
--- a/src/keys.h
+++ b/src/keys.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_KEY_ACCUMULATOR_H_
-#define V8_KEY_ACCUMULATOR_H_
+#ifndef V8_KEYS_H_
+#define V8_KEYS_H_
 
 #include "src/isolate.h"
 #include "src/objects.h"
@@ -36,25 +36,48 @@
       : isolate_(isolate), type_(type), filter_(filter) {}
   ~KeyAccumulator();
 
+  static MaybeHandle<FixedArray> GetKeys(Handle<JSReceiver> object,
+                                         KeyCollectionType type,
+                                         PropertyFilter filter,
+                                         GetKeysConversion keys_conversion,
+                                         bool filter_proxy_keys);
+  Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
+  Maybe<bool> CollectKeys(Handle<JSReceiver> receiver,
+                          Handle<JSReceiver> object);
+  void CollectOwnElementIndices(Handle<JSObject> object);
+  void CollectOwnPropertyNames(Handle<JSObject> object);
+
+  static Handle<FixedArray> GetEnumPropertyKeys(Isolate* isolate,
+                                                Handle<JSObject> object);
+
   bool AddKey(uint32_t key);
   bool AddKey(Object* key, AddKeyConversion convert);
   bool AddKey(Handle<Object> key, AddKeyConversion convert);
   void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
   void AddKeys(Handle<JSObject> array, AddKeyConversion convert);
-  void AddKeysFromProxy(Handle<JSObject> array);
-  Maybe<bool> AddKeysFromProxy(Handle<JSProxy> proxy, Handle<FixedArray> keys);
   void AddElementKeysFromInterceptor(Handle<JSObject> array);
+
   // Jump to the next level, pushing the current |levelLength_| to
   // |levelLengths_| and adding a new list to |elements_|.
   void NextPrototype();
   // Sort the integer indices in the last list in |elements_|
   void SortCurrentElementsList();
-  Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
   int length() { return length_; }
   Isolate* isolate() { return isolate_; }
+  PropertyFilter filter() { return filter_; }
   void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
 
  private:
+  Maybe<bool> CollectOwnKeys(Handle<JSReceiver> receiver,
+                             Handle<JSObject> object);
+  Maybe<bool> CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
+                                    Handle<JSProxy> proxy);
+  Maybe<bool> CollectOwnJSProxyTargetKeys(Handle<JSProxy> proxy,
+                                          Handle<JSReceiver> target);
+
+  Maybe<bool> AddKeysFromJSProxy(Handle<JSProxy> proxy,
+                                 Handle<FixedArray> keys);
+
   bool AddIntegerKey(uint32_t key);
   bool AddStringKey(Handle<Object> key, AddKeyConversion convert);
   bool AddSymbolKey(Handle<Object> array);
@@ -97,8 +120,6 @@
                      KeyCollectionType type, PropertyFilter filter)
       : isolate_(isolate), receiver_(receiver), type_(type), filter_(filter) {
     Prepare();
-    // TODO(cbruni): pass filter_ directly to the KeyAccumulator.
-    USE(filter_);
   }
 
   bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
@@ -116,9 +137,9 @@
   Handle<JSReceiver> receiver_;
   KeyCollectionType type_;
   PropertyFilter filter_;
+  bool filter_proxy_keys_ = true;
   bool is_receiver_simple_enum_ = false;
   bool has_empty_prototype_ = false;
-  bool filter_proxy_keys_ = true;
 
   DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
 };
@@ -126,4 +147,4 @@
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_KEY_ACCUMULATOR_H_
+#endif  // V8_KEYS_H_
diff --git a/src/libplatform/task-queue.h b/src/libplatform/task-queue.h
index efe9e07..24b68da 100644
--- a/src/libplatform/task-queue.h
+++ b/src/libplatform/task-queue.h
@@ -33,8 +33,8 @@
   void Terminate();
 
  private:
-  base::Mutex lock_;
   base::Semaphore process_queue_semaphore_;
+  base::Mutex lock_;
   std::queue<Task*> task_queue_;
   bool terminated_;
 
diff --git a/src/locked-queue-inl.h b/src/locked-queue-inl.h
index 8b3e9d0..eb18f64 100644
--- a/src/locked-queue-inl.h
+++ b/src/locked-queue-inl.h
@@ -5,7 +5,7 @@
 #ifndef V8_LOCKED_QUEUE_INL_
 #define V8_LOCKED_QUEUE_INL_
 
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
 #include "src/locked-queue.h"
 
 namespace v8 {
@@ -15,7 +15,7 @@
 struct LockedQueue<Record>::Node : Malloced {
   Node() : next(nullptr) {}
   Record value;
-  AtomicValue<Node*> next;
+  base::AtomicValue<Node*> next;
 };
 
 
diff --git a/src/log-utils.cc b/src/log-utils.cc
index ff9af68..a83a0ae 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -164,11 +164,7 @@
   }
 }
 
-
-void Log::MessageBuilder::AppendAddress(Address addr) {
-  Append("0x%" V8PRIxPTR, addr);
-}
-
+void Log::MessageBuilder::AppendAddress(Address addr) { Append("%p", addr); }
 
 void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
   DCHECK(symbol);
diff --git a/src/log-utils.h b/src/log-utils.h
index 3e70a96..059e5a5 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -10,6 +10,7 @@
 #include <cstdarg>
 
 #include "src/allocation.h"
+#include "src/base/compiler-specific.h"
 #include "src/base/platform/mutex.h"
 #include "src/flags.h"
 
@@ -62,10 +63,10 @@
     ~MessageBuilder() { }
 
     // Append string data to the log message.
-    void Append(const char* format, ...);
+    void PRINTF_FORMAT(2, 3) Append(const char* format, ...);
 
     // Append string data to the log message.
-    void AppendVA(const char* format, va_list args);
+    void PRINTF_FORMAT(2, 0) AppendVA(const char* format, va_list args);
 
     // Append a character to the log message.
     void Append(const char c);
diff --git a/src/log.cc b/src/log.cc
index 93111a2..97acea9 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -180,8 +180,7 @@
 
 void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
                                       AbstractCode* code,
-                                      SharedFunctionInfo* shared,
-                                      CompilationInfo* info, Name* name) {
+                                      SharedFunctionInfo* shared, Name* name) {
   name_buffer_->Init(tag);
   name_buffer_->AppendBytes(ComputeMarker(shared, code));
   name_buffer_->AppendName(name);
@@ -190,8 +189,7 @@
 
 void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
                                       AbstractCode* code,
-                                      SharedFunctionInfo* shared,
-                                      CompilationInfo* info, Name* source,
+                                      SharedFunctionInfo* shared, Name* source,
                                       int line, int column) {
   name_buffer_->Init(tag);
   name_buffer_->AppendBytes(ComputeMarker(shared, code));
@@ -284,12 +282,17 @@
     return;
   }
 
-  base::OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
-                   reinterpret_cast<uint64_t>(code->instruction_start()),
+  // Linux perf expects hex literals without a leading 0x, while some
+  // implementations of printf might prepend one when using the %p format
+  // for pointers, leading to wrongly formatted JIT symbols maps.
+  //
+  // Instead, we use V8PRIxPTR format string and cast pointer to uintpr_t,
+  // so that we have control over the exact output format.
+  base::OS::FPrint(perf_output_handle_, "%" V8PRIxPTR " %x %.*s\n",
+                   reinterpret_cast<uintptr_t>(code->instruction_start()),
                    code->instruction_size(), length, name);
 }
 
-
 // Low-level logging support.
 #define LL_LOG(Call) if (ll_logger_) ll_logger_->Call;
 
@@ -661,8 +664,9 @@
   std::vector<base::OS::SharedLibraryAddress> addresses =
       base::OS::GetSharedLibraryAddresses();
   for (size_t i = 0; i < addresses.size(); ++i) {
-    LOG(isolate_, SharedLibraryEvent(
-        addresses[i].library_path, addresses[i].start, addresses[i].end));
+    LOG(isolate_,
+        SharedLibraryEvent(addresses[i].library_path, addresses[i].start,
+                           addresses[i].end, addresses[i].aslr_slide));
   }
 
   // Start thread processing the profiler buffer.
@@ -789,7 +793,7 @@
 void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
   if (!log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,%" V8_PTR_PREFIX "d", name, value);
+  msg.Append("%s,%" V8PRIdPTR, name, value);
   msg.WriteToLogFile();
 }
 
@@ -797,7 +801,7 @@
 void Logger::HandleEvent(const char* name, Object** location) {
   if (!log_->IsEnabled() || !FLAG_log_handles) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,0x%" V8PRIxPTR, name, location);
+  msg.Append("%s,%p", name, location);
   msg.WriteToLogFile();
 }
 
@@ -821,14 +825,14 @@
   ApiEvent("api,check-security");
 }
 
-
 void Logger::SharedLibraryEvent(const std::string& library_path,
-                                uintptr_t start,
-                                uintptr_t end) {
+                                uintptr_t start, uintptr_t end,
+                                intptr_t aslr_slide) {
   if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR,
-             library_path.c_str(), start, end);
+  msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR
+             ",%" V8PRIdPTR,
+             library_path.c_str(), start, end, aslr_slide);
   msg.WriteToLogFile();
 }
 
@@ -838,7 +842,7 @@
   if (!log_->IsEnabled() || !FLAG_log_internal_timer_events) return;
   Log::MessageBuilder msg(log_);
   int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
-  msg.Append("code-deopt,%ld,%d", since_epoch, code->CodeSize());
+  msg.Append("code-deopt,%d,%d", since_epoch, code->CodeSize());
   msg.WriteToLogFile();
 }
 
@@ -848,7 +852,7 @@
   DCHECK(FLAG_log_timer_events || FLAG_prof_cpp);
   Log::MessageBuilder msg(log_);
   int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
-  msg.Append("current-time,%ld", since_epoch);
+  msg.Append("current-time,%d", since_epoch);
   msg.WriteToLogFile();
 }
 
@@ -1008,8 +1012,7 @@
 void Logger::NewEvent(const char* name, void* object, size_t size) {
   if (!log_->IsEnabled() || !FLAG_log) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("new,%s,0x%" V8PRIxPTR ",%u", name, object,
-             static_cast<unsigned int>(size));
+  msg.Append("new,%s,%p,%u", name, object, static_cast<unsigned int>(size));
   msg.WriteToLogFile();
 }
 
@@ -1017,7 +1020,7 @@
 void Logger::DeleteEvent(const char* name, void* object) {
   if (!log_->IsEnabled() || !FLAG_log) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("delete,%s,0x%" V8PRIxPTR, name, object);
+  msg.Append("delete,%s,%p", name, object);
   msg.WriteToLogFile();
 }
 
@@ -1037,12 +1040,12 @@
   } else {
     Symbol* symbol = Symbol::cast(name);
     if (symbol->name()->IsUndefined()) {
-      msg.Append(",1,symbol(hash %x)", prefix, symbol->Hash());
+      msg.Append(",1,symbol(hash %x)", symbol->Hash());
     } else {
       base::SmartArrayPointer<char> str =
           String::cast(symbol->name())
               ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-      msg.Append(",1,symbol(\"%s\" hash %x)", prefix, str.get(),
+      msg.Append(",1,symbol(\"%s%s\" hash %x)", prefix, str.get(),
                  symbol->Hash());
     }
   }
@@ -1114,12 +1117,11 @@
 }
 
 void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                             SharedFunctionInfo* shared, CompilationInfo* info,
-                             Name* name) {
-  PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, name));
+                             SharedFunctionInfo* shared, Name* name) {
+  PROFILER_LOG(CodeCreateEvent(tag, code, shared, name));
 
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
+  CALL_LISTENERS(CodeCreateEvent(tag, code, shared, name));
 
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   if (code == AbstractCode::cast(
@@ -1147,13 +1149,12 @@
 // the SharedFunctionInfo object, we left it to caller
 // to leave logging functions free from heap allocations.
 void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                             SharedFunctionInfo* shared, CompilationInfo* info,
-                             Name* source, int line, int column) {
-  PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line, column));
+                             SharedFunctionInfo* shared, Name* source, int line,
+                             int column) {
+  PROFILER_LOG(CodeCreateEvent(tag, code, shared, source, line, column));
 
   if (!is_logging_code_events()) return;
-  CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line,
-                                 column));
+  CALL_LISTENERS(CodeCreateEvent(tag, code, shared, source, line, column));
 
   if (!FLAG_log_code || !log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
@@ -1386,7 +1387,7 @@
   Log::MessageBuilder msg(log_);
   msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
   msg.AppendAddress(sample->pc);
-  msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
+  msg.Append(",%d", static_cast<int>(timer_.Elapsed().InMicroseconds()));
   if (sample->has_external_callback) {
     msg.Append(",1,");
     msg.AppendAddress(sample->external_callback_entry);
@@ -1577,12 +1578,15 @@
 void Logger::LogBytecodeHandlers() {
   if (!FLAG_ignition) return;
 
-  interpreter::Interpreter* interpreter = isolate_->interpreter();
+  const interpreter::OperandScale kOperandScales[] = {
+#define VALUE(Name, _) interpreter::OperandScale::k##Name,
+      OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+  };
+
   const int last_index = static_cast<int>(interpreter::Bytecode::kLast);
-  for (auto operand_scale = interpreter::OperandScale::kSingle;
-       operand_scale <= interpreter::OperandScale::kMaxValid;
-       operand_scale =
-           interpreter::Bytecodes::NextOperandScale(operand_scale)) {
+  interpreter::Interpreter* interpreter = isolate_->interpreter();
+  for (auto operand_scale : kOperandScales) {
     for (int index = 0; index <= last_index; ++index) {
       interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
       if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
@@ -1610,21 +1614,19 @@
         PROFILE(isolate_,
                 CodeCreateEvent(
                     Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-                    *code, *shared, NULL,
-                    *script_name, line_num, column_num));
+                    *code, *shared, *script_name, line_num, column_num));
       } else {
         // Can't distinguish eval and script here, so always use Script.
-        PROFILE(isolate_,
-                CodeCreateEvent(
-                    Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
-                    *code, *shared, NULL, *script_name));
+        PROFILE(isolate_, CodeCreateEvent(Logger::ToNativeByScript(
+                                              Logger::SCRIPT_TAG, *script),
+                                          *code, *shared, *script_name));
       }
     } else {
       PROFILE(isolate_,
               CodeCreateEvent(
                   Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-                  *code, *shared, NULL,
-                  isolate_->heap()->empty_string(), line_num, column_num));
+                  *code, *shared, isolate_->heap()->empty_string(), line_num,
+                  column_num));
     }
   } else if (shared->IsApiFunction()) {
     // API function.
@@ -1640,9 +1642,8 @@
       PROFILE(isolate_, CallbackEvent(*func_name, entry_point));
     }
   } else {
-    PROFILE(isolate_,
-            CodeCreateEvent(
-                Logger::LAZY_COMPILE_TAG, *code, *shared, NULL, *func_name));
+    PROFILE(isolate_, CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *shared,
+                                      *func_name));
   }
 }
 
diff --git a/src/log.h b/src/log.h
index fdc5047..9953b4c 100644
--- a/src/log.h
+++ b/src/log.h
@@ -8,6 +8,7 @@
 #include <string>
 
 #include "src/allocation.h"
+#include "src/base/compiler-specific.h"
 #include "src/base/platform/elapsed-timer.h"
 #include "src/base/platform/platform.h"
 #include "src/objects.h"
@@ -56,7 +57,6 @@
 
 // Forward declarations.
 class CodeEventListener;
-class CompilationInfo;
 class CpuProfiler;
 class Isolate;
 class Log;
@@ -99,12 +99,9 @@
   V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic")                             \
   V(CALL_MISS_TAG, "CallMiss")                                           \
   V(CALL_NORMAL_TAG, "CallNormal")                                       \
-  V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic")                      \
   V(LOAD_INITIALIZE_TAG, "LoadInitialize")                               \
-  V(LOAD_PREMONOMORPHIC_TAG, "LoadPreMonomorphic")                       \
   V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic")                             \
   V(STORE_INITIALIZE_TAG, "StoreInitialize")                             \
-  V(STORE_PREMONOMORPHIC_TAG, "StorePreMonomorphic")                     \
   V(STORE_GENERIC_TAG, "StoreGeneric")                                   \
   V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic")                           \
   V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak")                   \
@@ -113,7 +110,6 @@
   V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic")                  \
   V(KEYED_CALL_MISS_TAG, "KeyedCallMiss")                                \
   V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal")                            \
-  V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic")           \
   V(CALLBACK_TAG, "Callback")                                            \
   V(EVAL_TAG, "Eval")                                                    \
   V(FUNCTION_TAG, "Function")                                            \
@@ -227,11 +223,10 @@
                        const char* source);
   void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, Name* name);
   void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, CompilationInfo* info,
-                       Name* name);
+                       SharedFunctionInfo* shared, Name* name);
   void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, CompilationInfo* info,
-                       Name* source, int line, int column);
+                       SharedFunctionInfo* shared, Name* source, int line,
+                       int column);
   void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                        int args_count);
   // Emits a code deoptimization event.
@@ -274,9 +269,8 @@
   void HeapSampleStats(const char* space, const char* kind,
                        intptr_t capacity, intptr_t used);
 
-  void SharedLibraryEvent(const std::string& library_path,
-                          uintptr_t start,
-                          uintptr_t end);
+  void SharedLibraryEvent(const std::string& library_path, uintptr_t start,
+                          uintptr_t end, intptr_t aslr_slide);
 
   void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
   void CurrentTimeEvent();
@@ -358,7 +352,7 @@
   // Emits a profiler tick event. Used by the profiler thread.
   void TickEvent(TickSample* sample, bool overflow);
 
-  void ApiEvent(const char* name, ...);
+  PRINTF_FORMAT(2, 3) void ApiEvent(const char* format, ...);
 
   // Logs a StringEvent regardless of whether FLAG_log is true.
   void UncheckedStringEvent(const char* name, const char* value);
@@ -473,12 +467,10 @@
   virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                                Name* name) = 0;
   virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                               SharedFunctionInfo* shared,
-                               CompilationInfo* info, Name* name) = 0;
+                               SharedFunctionInfo* shared, Name* name) = 0;
   virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                               SharedFunctionInfo* shared,
-                               CompilationInfo* info, Name* source, int line,
-                               int column) = 0;
+                               SharedFunctionInfo* shared, Name* source,
+                               int line, int column) = 0;
   virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                                int args_count) = 0;
   virtual void CallbackEvent(Name* name, Address entry_point) = 0;
@@ -505,11 +497,10 @@
   void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                        int args_count) override;
   void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, CompilationInfo* info,
-                       Name* name) override;
+                       SharedFunctionInfo* shared, Name* name) override;
   void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, CompilationInfo* info,
-                       Name* source, int line, int column) override;
+                       SharedFunctionInfo* shared, Name* source, int line,
+                       int column) override;
   void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
 
   void CallbackEvent(Name* name, Address entry_point) override {}
diff --git a/src/lookup.cc b/src/lookup.cc
index 3df8752..bdb9f0a 100644
--- a/src/lookup.cc
+++ b/src/lookup.cc
@@ -158,47 +158,42 @@
   DCHECK(IsFound() || !holder_->HasFastProperties());
 }
 
-bool LookupIterator::HolderIsInContextIndex(uint32_t index) const {
-  DisallowHeapAllocation no_gc;
-
-  Object* context = heap()->native_contexts_list();
-  while (!context->IsUndefined()) {
-    Context* current_context = Context::cast(context);
-    if (current_context->get(index) == *holder_) {
-      return true;
-    }
-    context = current_context->get(Context::NEXT_CONTEXT_LINK);
-  }
-  return false;
-}
-
 void LookupIterator::InternalUpdateProtector() {
   if (isolate_->bootstrapper()->IsActive()) return;
-  if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
 
   if (*name_ == heap()->constructor_string()) {
+    if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
     // Setting the constructor property could change an instance's @@species
     if (holder_->IsJSArray()) {
       isolate_->CountUsage(
           v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
       isolate_->InvalidateArraySpeciesProtector();
     } else if (holder_->map()->is_prototype_map()) {
+      DisallowHeapAllocation no_gc;
       // Setting the constructor of Array.prototype of any realm also needs
       // to invalidate the species protector
-      if (HolderIsInContextIndex(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+      if (isolate_->IsInAnyContext(*holder_,
+                                   Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
         isolate_->CountUsage(v8::Isolate::UseCounterFeature::
                                  kArrayPrototypeConstructorModified);
         isolate_->InvalidateArraySpeciesProtector();
       }
     }
   } else if (*name_ == heap()->species_symbol()) {
+    if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
     // Setting the Symbol.species property of any Array constructor invalidates
     // the species protector
-    if (HolderIsInContextIndex(Context::ARRAY_FUNCTION_INDEX)) {
+    if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
       isolate_->CountUsage(
           v8::Isolate::UseCounterFeature::kArraySpeciesModified);
       isolate_->InvalidateArraySpeciesProtector();
     }
+  } else if (*name_ == heap()->is_concat_spreadable_symbol()) {
+    if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
+    isolate_->InvalidateIsConcatSpreadableProtector();
+  } else if (*name_ == heap()->has_instance_symbol()) {
+    if (!isolate_->IsHasInstanceLookupChainIntact()) return;
+    isolate_->InvalidateHasInstanceProtector();
   }
 }
 
@@ -352,9 +347,14 @@
     ElementsAccessor* accessor = object->GetElementsAccessor();
     accessor->Delete(object, number_);
   } else {
-    PropertyNormalizationMode mode = holder->map()->is_prototype_map()
-                                         ? KEEP_INOBJECT_PROPERTIES
-                                         : CLEAR_INOBJECT_PROPERTIES;
+    bool is_prototype_map = holder->map()->is_prototype_map();
+    RuntimeCallTimerScope stats_scope(
+        isolate_, is_prototype_map
+                      ? &RuntimeCallStats::PrototypeObject_DeleteProperty
+                      : &RuntimeCallStats::Object_DeleteProperty);
+
+    PropertyNormalizationMode mode =
+        is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
 
     if (holder->HasFastProperties()) {
       JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
@@ -370,11 +370,10 @@
   state_ = NOT_FOUND;
 }
 
-
 void LookupIterator::TransitionToAccessorProperty(
-    AccessorComponent component, Handle<Object> accessor,
+    Handle<Object> getter, Handle<Object> setter,
     PropertyAttributes attributes) {
-  DCHECK(!accessor->IsNull());
+  DCHECK(!getter->IsNull() || !setter->IsNull());
   // Can only be called when the receiver is a JSObject. JSProxy has to be
   // handled via a trap. Adding properties to primitive values is not
   // observable.
@@ -393,7 +392,7 @@
         IsFound() ? static_cast<int>(number_) : DescriptorArray::kNotFound;
 
     Handle<Map> new_map = Map::TransitionToAccessorProperty(
-        old_map, name_, descriptor, component, accessor, attributes);
+        isolate_, old_map, name_, descriptor, getter, setter, attributes);
     bool simple_transition = new_map->GetBackPointer() == receiver->map();
     JSObject::MigrateToMap(receiver, new_map);
 
@@ -413,15 +412,18 @@
   if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
     pair = Handle<AccessorPair>::cast(GetAccessors());
     // If the component and attributes are identical, nothing has to be done.
-    if (pair->get(component) == *accessor) {
-      if (property_details().attributes() == attributes) return;
+    if (pair->Equals(*getter, *setter)) {
+      if (property_details().attributes() == attributes) {
+        if (!IsElement()) JSObject::ReoptimizeIfPrototype(receiver);
+        return;
+      }
     } else {
       pair = AccessorPair::Copy(pair);
-      pair->set(component, *accessor);
+      pair->SetComponents(*getter, *setter);
     }
   } else {
     pair = factory()->NewAccessorPair();
-    pair->set(component, *accessor);
+    pair->SetComponents(*getter, *setter);
   }
 
   TransitionToAccessorPair(pair, attributes);
@@ -638,17 +640,7 @@
 JSReceiver* LookupIterator::NextHolder(Map* map) {
   DisallowHeapAllocation no_gc;
   if (map->prototype() == heap()->null_value()) return NULL;
-
-  DCHECK(!map->IsJSGlobalProxyMap() || map->has_hidden_prototype());
-
-  if (!check_prototype_chain() &&
-      !(check_hidden() && map->has_hidden_prototype()) &&
-      // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
-      // when not checking other hidden prototypes.
-      !map->IsJSGlobalProxyMap()) {
-    return NULL;
-  }
-
+  if (!check_prototype_chain() && !map->has_hidden_prototype()) return NULL;
   return JSReceiver::cast(map->prototype());
 }
 
diff --git a/src/lookup.h b/src/lookup.h
index 8005f32..bb65639 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -16,17 +16,14 @@
  public:
   enum Configuration {
     // Configuration bits.
-    kHidden = 1 << 0,
-    kInterceptor = 1 << 1,
-    kPrototypeChain = 1 << 2,
+    kInterceptor = 1 << 0,
+    kPrototypeChain = 1 << 1,
 
     // Convience combinations of bits.
     OWN_SKIP_INTERCEPTOR = 0,
     OWN = kInterceptor,
-    HIDDEN_SKIP_INTERCEPTOR = kHidden,
-    HIDDEN = kHidden | kInterceptor,
-    PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kHidden | kPrototypeChain,
-    PROTOTYPE_CHAIN = kHidden | kPrototypeChain | kInterceptor,
+    PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kPrototypeChain,
+    PROTOTYPE_CHAIN = kPrototypeChain | kInterceptor,
     DEFAULT = PROTOTYPE_CHAIN
   };
 
@@ -229,8 +226,8 @@
   void ReconfigureDataProperty(Handle<Object> value,
                                PropertyAttributes attributes);
   void Delete();
-  void TransitionToAccessorProperty(AccessorComponent component,
-                                    Handle<Object> accessor,
+  void TransitionToAccessorProperty(Handle<Object> getter,
+                                    Handle<Object> setter,
                                     PropertyAttributes attributes);
   void TransitionToAccessorPair(Handle<Object> pair,
                                 PropertyAttributes attributes);
@@ -263,9 +260,12 @@
   Handle<Object> GetDataValue() const;
   void WriteDataValue(Handle<Object> value);
   inline void UpdateProtector() {
-    if (FLAG_harmony_species && !IsElement() &&
-        (*name_ == heap()->constructor_string() ||
-         *name_ == heap()->species_symbol())) {
+    if (IsElement()) return;
+    if (*name_ == heap()->is_concat_spreadable_symbol() ||
+        (FLAG_harmony_species && (*name_ == heap()->constructor_string() ||
+                                  *name_ == heap()->species_symbol())) ||
+        (FLAG_harmony_instanceof &&
+         (*name_ == heap()->has_instance_symbol()))) {
       InternalUpdateProtector();
     }
   }
@@ -315,7 +315,6 @@
                       : holder->GetNamedInterceptor();
   }
 
-  bool check_hidden() const { return (configuration_ & kHidden) != 0; }
   bool check_interceptor() const {
     return (configuration_ & kInterceptor) != 0;
   }
@@ -334,12 +333,7 @@
 
   static Configuration ComputeConfiguration(
       Configuration configuration, Handle<Name> name) {
-    if (name->IsPrivate()) {
-      return static_cast<Configuration>(configuration &
-                                        HIDDEN_SKIP_INTERCEPTOR);
-    } else {
-      return configuration;
-    }
+    return name->IsPrivate() ? OWN_SKIP_INTERCEPTOR : configuration;
   }
 
   static Handle<JSReceiver> GetRootForNonJSReceiver(
@@ -353,8 +347,6 @@
 
   State NotFound(JSReceiver* const holder) const;
 
-  bool HolderIsInContextIndex(uint32_t index) const;
-
   // If configuration_ becomes mutable, update
   // HolderIsReceiverOrHiddenPrototype.
   const Configuration configuration_;
diff --git a/src/machine-type.h b/src/machine-type.h
index 1085657..0878610 100644
--- a/src/machine-type.h
+++ b/src/machine-type.h
@@ -177,7 +177,8 @@
 
 inline bool IsFloatingPoint(MachineRepresentation rep) {
   return rep == MachineRepresentation::kFloat32 ||
-         rep == MachineRepresentation::kFloat64;
+         rep == MachineRepresentation::kFloat64 ||
+         rep == MachineRepresentation::kSimd128;
 }
 
 // Gets the log2 of the element size in bytes of the machine type.
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 6338b2c..b683045 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -19,21 +19,22 @@
 enum AllocationFlags {
   // No special flags.
   NO_ALLOCATION_FLAGS = 0,
-  // Return the pointer to the allocated already tagged as a heap object.
-  TAG_OBJECT = 1 << 0,
   // The content of the result register already contains the allocation top in
   // new space.
-  RESULT_CONTAINS_TOP = 1 << 1,
+  RESULT_CONTAINS_TOP = 1 << 0,
   // Specify that the requested size of the space to allocate is specified in
   // words instead of bytes.
-  SIZE_IN_WORDS = 1 << 2,
+  SIZE_IN_WORDS = 1 << 1,
   // Align the allocation to a multiple of kDoubleSize
-  DOUBLE_ALIGNMENT = 1 << 3,
+  DOUBLE_ALIGNMENT = 1 << 2,
   // Directly allocate in old space
-  PRETENURE = 1 << 4,
+  PRETENURE = 1 << 3,
+  // Allocation folding dominator
+  ALLOCATION_FOLDING_DOMINATOR = 1 << 4,
+  // Folded allocation
+  ALLOCATION_FOLDED = 1 << 5
 };
 
-
 #if V8_TARGET_ARCH_IA32
 #include "src/ia32/assembler-ia32.h"
 #include "src/ia32/assembler-ia32-inl.h"
diff --git a/src/messages.cc b/src/messages.cc
index 67ab36f..6e7c495 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -7,11 +7,23 @@
 #include "src/api.h"
 #include "src/execution.h"
 #include "src/isolate-inl.h"
+#include "src/keys.h"
 #include "src/string-builder.h"
+#include "src/wasm/wasm-module.h"
 
 namespace v8 {
 namespace internal {
 
+MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
+                                 int end_pos)
+    : script_(script), start_pos_(start_pos), end_pos_(end_pos) {}
+MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
+                                 int end_pos, Handle<JSFunction> function)
+    : script_(script),
+      start_pos_(start_pos),
+      end_pos_(end_pos),
+      function_(function) {}
+MessageLocation::MessageLocation() : start_pos_(-1), end_pos_(-1) {}
 
 // If no message listeners have been registered this one is called
 // by default.
@@ -158,11 +170,25 @@
     : isolate_(isolate) {
   Handle<Object> maybe_function = JSObject::GetDataProperty(
       call_site_obj, isolate->factory()->call_site_function_symbol());
-  if (!maybe_function->IsJSFunction()) return;
+  if (maybe_function->IsJSFunction()) {
+    // javascript
+    fun_ = Handle<JSFunction>::cast(maybe_function);
+    receiver_ = JSObject::GetDataProperty(
+        call_site_obj, isolate->factory()->call_site_receiver_symbol());
+  } else {
+    Handle<Object> maybe_wasm_func_index = JSObject::GetDataProperty(
+        call_site_obj, isolate->factory()->call_site_wasm_func_index_symbol());
+    if (!maybe_wasm_func_index->IsSmi()) {
+      // invalid: neither javascript nor wasm
+      return;
+    }
+    // wasm
+    wasm_obj_ = Handle<JSObject>::cast(JSObject::GetDataProperty(
+        call_site_obj, isolate->factory()->call_site_wasm_obj_symbol()));
+    wasm_func_index_ = Smi::cast(*maybe_wasm_func_index)->value();
+    DCHECK(static_cast<int>(wasm_func_index_) >= 0);
+  }
 
-  fun_ = Handle<JSFunction>::cast(maybe_function);
-  receiver_ = JSObject::GetDataProperty(
-      call_site_obj, isolate->factory()->call_site_receiver_symbol());
   CHECK(JSObject::GetDataProperty(
             call_site_obj, isolate->factory()->call_site_position_symbol())
             ->ToInt32(&pos_));
@@ -170,15 +196,22 @@
 
 
 Handle<Object> CallSite::GetFileName() {
-  Handle<Object> script(fun_->shared()->script(), isolate_);
-  if (script->IsScript()) {
-    return Handle<Object>(Handle<Script>::cast(script)->name(), isolate_);
-  }
-  return isolate_->factory()->null_value();
+  if (!IsJavaScript()) return isolate_->factory()->null_value();
+  Object* script = fun_->shared()->script();
+  if (!script->IsScript()) return isolate_->factory()->null_value();
+  return Handle<Object>(Script::cast(script)->name(), isolate_);
 }
 
 
 Handle<Object> CallSite::GetFunctionName() {
+  if (IsWasm()) {
+    if (wasm_obj_->IsUndefined()) return isolate_->factory()->null_value();
+    // wasm_obj_ can be a String if we generate WASM code directly in a test
+    // case.
+    if (wasm_obj_->IsString()) return wasm_obj_;
+    return wasm::GetWasmFunctionName(Handle<JSObject>::cast(wasm_obj_),
+                                     wasm_func_index_);
+  }
   Handle<String> result = JSFunction::GetName(fun_);
   if (result->length() != 0) return result;
 
@@ -191,19 +224,16 @@
   return isolate_->factory()->null_value();
 }
 
-
 Handle<Object> CallSite::GetScriptNameOrSourceUrl() {
-  Handle<Object> script_obj(fun_->shared()->script(), isolate_);
-  if (script_obj->IsScript()) {
-    Handle<Script> script = Handle<Script>::cast(script_obj);
-    Object* source_url = script->source_url();
-    if (source_url->IsString()) return Handle<Object>(source_url, isolate_);
-    return Handle<Object>(script->name(), isolate_);
-  }
-  return isolate_->factory()->null_value();
+  if (!IsJavaScript()) return isolate_->factory()->null_value();
+  Object* script_obj = fun_->shared()->script();
+  if (!script_obj->IsScript()) return isolate_->factory()->null_value();
+  Handle<Script> script(Script::cast(script_obj), isolate_);
+  Object* source_url = script->source_url();
+  if (source_url->IsString()) return Handle<Object>(source_url, isolate_);
+  return Handle<Object>(script->name(), isolate_);
 }
 
-
 bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
                      Handle<JSFunction> fun,
                      LookupIterator::Configuration config) {
@@ -223,7 +253,7 @@
 
 
 Handle<Object> CallSite::GetMethodName() {
-  if (receiver_->IsNull() || receiver_->IsUndefined()) {
+  if (!IsJavaScript() || receiver_->IsNull() || receiver_->IsUndefined()) {
     return isolate_->factory()->null_value();
   }
   Handle<JSReceiver> receiver =
@@ -261,7 +291,8 @@
     if (!current->IsJSObject()) break;
     Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
     if (current_obj->IsAccessCheckNeeded()) break;
-    Handle<FixedArray> keys = JSObject::GetEnumPropertyKeys(current_obj);
+    Handle<FixedArray> keys =
+        KeyAccumulator::GetEnumPropertyKeys(isolate_, current_obj);
     for (int i = 0; i < keys->length(); i++) {
       HandleScope inner_scope(isolate_);
       if (!keys->get(i)->IsName()) continue;
@@ -281,7 +312,7 @@
 
 
 int CallSite::GetLineNumber() {
-  if (pos_ >= 0) {
+  if (pos_ >= 0 && IsJavaScript()) {
     Handle<Object> script_obj(fun_->shared()->script(), isolate_);
     if (script_obj->IsScript()) {
       Handle<Script> script = Handle<Script>::cast(script_obj);
@@ -293,7 +324,7 @@
 
 
 int CallSite::GetColumnNumber() {
-  if (pos_ >= 0) {
+  if (pos_ >= 0 && IsJavaScript()) {
     Handle<Object> script_obj(fun_->shared()->script(), isolate_);
     if (script_obj->IsScript()) {
       Handle<Script> script = Handle<Script>::cast(script_obj);
@@ -305,6 +336,7 @@
 
 
 bool CallSite::IsNative() {
+  if (!IsJavaScript()) return false;
   Handle<Object> script(fun_->shared()->script(), isolate_);
   return script->IsScript() &&
          Handle<Script>::cast(script)->type() == Script::TYPE_NATIVE;
@@ -312,12 +344,14 @@
 
 
 bool CallSite::IsToplevel() {
+  if (IsWasm()) return false;
   return receiver_->IsJSGlobalProxy() || receiver_->IsNull() ||
          receiver_->IsUndefined();
 }
 
 
 bool CallSite::IsEval() {
+  if (!IsJavaScript()) return false;
   Handle<Object> script(fun_->shared()->script(), isolate_);
   return script->IsScript() &&
          Handle<Script>::cast(script)->compilation_type() ==
@@ -326,7 +360,7 @@
 
 
 bool CallSite::IsConstructor() {
-  if (!receiver_->IsJSObject()) return false;
+  if (!IsJavaScript() || !receiver_->IsJSObject()) return false;
   Handle<Object> constructor =
       JSReceiver::GetDataProperty(Handle<JSObject>::cast(receiver_),
                                   isolate_->factory()->constructor_string());
diff --git a/src/messages.h b/src/messages.h
index 4aa0b73..a9f321e 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -24,13 +24,10 @@
 
 class MessageLocation {
  public:
+  MessageLocation(Handle<Script> script, int start_pos, int end_pos);
   MessageLocation(Handle<Script> script, int start_pos, int end_pos,
-                  Handle<JSFunction> function = Handle<JSFunction>())
-      : script_(script),
-        start_pos_(start_pos),
-        end_pos_(end_pos),
-        function_(function) {}
-  MessageLocation() : start_pos_(-1), end_pos_(-1) { }
+                  Handle<JSFunction> function);
+  MessageLocation();
 
   Handle<Script> script() const { return script_; }
   int start_pos() const { return start_pos_; }
@@ -62,13 +59,16 @@
   bool IsEval();
   bool IsConstructor();
 
-  bool IsValid() { return !fun_.is_null(); }
+  bool IsJavaScript() { return !fun_.is_null(); }
+  bool IsWasm() { return !wasm_obj_.is_null(); }
 
  private:
   Isolate* isolate_;
   Handle<Object> receiver_;
   Handle<JSFunction> fun_;
-  int32_t pos_;
+  int32_t pos_ = -1;
+  Handle<JSObject> wasm_obj_;
+  uint32_t wasm_func_index_ = static_cast<uint32_t>(-1);
 };
 
 #define MESSAGE_TEMPLATES(T)                                                   \
@@ -94,12 +94,10 @@
   T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements")         \
   T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.")    \
   T(CalledNonCallable, "% is not a function")                                  \
-  T(CalledNonCallableInstanceOf,                                               \
-    "Right-hand side of 'instanceof' is not callable")                         \
   T(CalledOnNonObject, "% called on non-object")                               \
   T(CalledOnNullOrUndefined, "% called on null or undefined")                  \
   T(CallSiteExpectsFunction,                                                   \
-    "CallSite expects function as second argument, got %")                     \
+    "CallSite expects function or number as second argument, got %")           \
   T(CallSiteMethod, "CallSite method % expects CallSite as receiver")          \
   T(CannotConvertToPrimitive, "Cannot convert object to primitive value")      \
   T(CannotPreventExt, "Cannot prevent extensions")                             \
@@ -120,6 +118,7 @@
   T(DebuggerType, "Debugger: Parameters have wrong types.")                    \
   T(DeclarationMissingInitializer, "Missing initializer in % declaration")     \
   T(DefineDisallowed, "Cannot define property:%, object is not extensible.")   \
+  T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer")           \
   T(DuplicateTemplateProperty, "Object template has duplicate property '%'")   \
   T(ExtendsValueGenerator,                                                     \
     "Class extends value % may not be a generator function")                   \
@@ -131,8 +130,6 @@
   T(GeneratorRunning, "Generator is already running")                          \
   T(IllegalInvocation, "Illegal invocation")                                   \
   T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %")  \
-  T(InstanceofFunctionExpected,                                                \
-    "Expecting a function in instanceof check, but got %")                     \
   T(InstanceofNonobjectProto,                                                  \
     "Function has non-object prototype '%' in instanceof check")               \
   T(InvalidArgument, "invalid_argument")                                       \
@@ -149,6 +146,8 @@
     "Method invoked on undefined or null value.")                              \
   T(MethodInvokedOnWrongType, "Method invoked on an object that is not %.")    \
   T(NoAccess, "no access")                                                     \
+  T(NonCallableInInstanceOfCheck,                                              \
+    "Right-hand side of 'instanceof' is not callable")                         \
   T(NonCoercible, "Cannot match against 'undefined' or 'null'.")               \
   T(NonExtensibleProto, "% is not extensible")                                 \
   T(NonObjectInInstanceOfCheck,                                                \
@@ -175,19 +174,6 @@
   T(ObjectSetterExpectingFunction,                                             \
     "Object.prototype.__defineSetter__: Expecting function")                   \
   T(ObjectSetterCallable, "Setter must be a function: %")                      \
-  T(ObserveCallbackFrozen,                                                     \
-    "Object.observe cannot deliver to a frozen function object")               \
-  T(ObserveGlobalProxy, "% cannot be called on the global proxy object")       \
-  T(ObserveAccessChecked, "% cannot be called on access-checked objects")      \
-  T(ObserveInvalidAccept,                                                      \
-    "Third argument to Object.observe must be an array of strings.")           \
-  T(ObserveNonFunction, "Object.% cannot deliver to non-function")             \
-  T(ObserveNonObject, "Object.% cannot % non-object")                          \
-  T(ObserveNotifyNonNotifier, "notify called on non-notifier object")          \
-  T(ObservePerformNonFunction, "Cannot perform non-function")                  \
-  T(ObservePerformNonString, "Invalid non-string changeType")                  \
-  T(ObserveTypeNonString,                                                      \
-    "Invalid changeRecord with non-string 'type' property")                    \
   T(OrdinaryFunctionCalledAsConstructor,                                       \
     "Function object that's not a constructor was created with new")           \
   T(PromiseCyclic, "Chaining cycle detected for promise %")                    \
@@ -370,6 +356,7 @@
   T(BadSetterArity, "Setter must have exactly one formal parameter.")          \
   T(ConstructorIsAccessor, "Class constructor may not be an accessor")         \
   T(ConstructorIsGenerator, "Class constructor may not be a generator")        \
+  T(ConstructorIsAsync, "Class constructor may not be an async method")        \
   T(DerivedConstructorReturn,                                                  \
     "Derived constructors may only return object or undefined")                \
   T(DuplicateConstructor, "A class may only have one constructor")             \
@@ -380,6 +367,8 @@
     "% loop variable declaration may not have an initializer.")                \
   T(ForInOfLoopMultiBindings,                                                  \
     "Invalid left-hand side in % loop: Must have a single binding.")           \
+  T(GeneratorInLegacyContext,                                                  \
+    "Generator declarations are not allowed in legacy contexts.")              \
   T(IllegalBreak, "Illegal break statement")                                   \
   T(IllegalContinue, "Illegal continue statement")                             \
   T(IllegalLanguageModeDirective,                                              \
@@ -430,9 +419,6 @@
   T(SloppyFunction,                                                            \
     "In non-strict mode code, functions can only be declared at top level, "   \
     "inside a block, or as the body of an if statement.")                      \
-  T(SloppyLexical,                                                             \
-    "Block-scoped declarations (let, const, function, class) not yet "         \
-    "supported outside strict mode")                                           \
   T(SpeciesNotConstructor,                                                     \
     "object.constructor[Symbol.species] is not a constructor")                 \
   T(StrictDelete, "Delete of an unqualified identifier in strict mode.")       \
@@ -445,6 +431,10 @@
   T(TemplateOctalLiteral,                                                      \
     "Octal literals are not allowed in template strings.")                     \
   T(ThisFormalParameter, "'this' is not a valid formal parameter name")        \
+  T(AwaitBindingIdentifier,                                                    \
+    "'await' is not a valid identifier name in an async function")             \
+  T(AwaitExpressionFormalParameter,                                            \
+    "Illegal await-expression in formal parameters of async function")         \
   T(TooManyArguments,                                                          \
     "Too many arguments in function call (only 65535 allowed)")                \
   T(TooManyParameters,                                                         \
@@ -455,10 +445,19 @@
   T(UnexpectedEOS, "Unexpected end of input")                                  \
   T(UnexpectedFunctionSent,                                                    \
     "function.sent expression is not allowed outside a generator")             \
+  T(UnexpectedInsideTailCall, "Unexpected expression inside tail call")        \
   T(UnexpectedReserved, "Unexpected reserved word")                            \
   T(UnexpectedStrictReserved, "Unexpected strict mode reserved word")          \
   T(UnexpectedSuper, "'super' keyword unexpected here")                        \
+  T(UnexpectedSloppyTailCall,                                                  \
+    "Tail call expressions are not allowed in non-strict mode")                \
   T(UnexpectedNewTarget, "new.target expression is not allowed here")          \
+  T(UnexpectedTailCall, "Tail call expression is not allowed here")            \
+  T(UnexpectedTailCallInCatchBlock,                                            \
+    "Tail call expression in catch block when finally block is also present")  \
+  T(UnexpectedTailCallInForInOf, "Tail call expression in for-in/of body")     \
+  T(UnexpectedTailCallInTryBlock, "Tail call expression in try block")         \
+  T(UnexpectedTailCallOfEval, "Tail call of a direct eval is not allowed")     \
   T(UnexpectedTemplateString, "Unexpected template string")                    \
   T(UnexpectedToken, "Unexpected token %")                                     \
   T(UnexpectedTokenIdentifier, "Unexpected identifier")                        \
@@ -478,7 +477,16 @@
   /* EvalError */                                                              \
   T(CodeGenFromStrings, "%")                                                   \
   /* URIError */                                                               \
-  T(URIMalformed, "URI malformed")
+  T(URIMalformed, "URI malformed")                                             \
+  /* Wasm errors (currently Error) */                                          \
+  T(WasmTrapUnreachable, "unreachable")                                        \
+  T(WasmTrapMemOutOfBounds, "memory access out of bounds")                     \
+  T(WasmTrapDivByZero, "divide by zero")                                       \
+  T(WasmTrapDivUnrepresentable, "divide result unrepresentable")               \
+  T(WasmTrapRemByZero, "remainder by zero")                                    \
+  T(WasmTrapFloatUnrepresentable, "integer result unrepresentable")            \
+  T(WasmTrapFuncInvalid, "invalid function")                                   \
+  T(WasmTrapFuncSigMismatch, "function signature mismatch")
 
 class MessageTemplate {
  public:
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 517d4ad..b463c0b 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -102,11 +102,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Assembler::target_address_at(pc_, host_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) ||
          IsRuntimeEntry(rmode_) ||
@@ -156,19 +151,6 @@
   }
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
-                                   icache_flush_mode);
-}
-
 Address Assembler::target_address_from_return_address(Address pc) {
   return pc - kCallTargetAddressOffset;
 }
@@ -380,7 +362,7 @@
   }
 }
 
-
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index bfa2328..f95323b 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -189,6 +189,42 @@
   return false;
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_memory_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_memory_reference &&
+           updated_memory_reference < new_base + new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
+        icache_flush_mode);
+  } else {
+    UNREACHABLE();
+  }
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of Operand and MemOperand.
@@ -1829,11 +1865,17 @@
 
 
 void Assembler::lwl(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+         IsMipsArchVariant(kMips32r2));
   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
 }
 
 
 void Assembler::lwr(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+         IsMipsArchVariant(kMips32r2));
   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
 }
 
@@ -1869,11 +1911,17 @@
 
 
 void Assembler::swl(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+         IsMipsArchVariant(kMips32r2));
   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
 }
 
 
 void Assembler::swr(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+         IsMipsArchVariant(kMips32r2));
   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
 }
 
@@ -2009,6 +2057,10 @@
   emit(instr);
 }
 
+void Assembler::sync() {
+  Instr sync_instr = SPECIAL | SYNC;
+  emit(sync_instr);
+}
 
 // Move from HI/LO register.
 
@@ -2955,6 +3007,7 @@
     data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
   } else {
     data = jump_address(label);
+    unbound_labels_count_++;
     internal_reference_positions_.insert(label->pos());
   }
   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 886ac6c..c595cc9 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -63,6 +63,8 @@
   V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
   V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
 
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+
 #define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
   V(f0)  V(f2)  V(f4)  V(f6)  V(f8)  V(f10) V(f12) V(f14) \
   V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
@@ -154,7 +156,7 @@
 Register ToRegister(int num);
 
 // Coprocessor register.
-struct DoubleRegister {
+struct FPURegister {
   enum Code {
 #define REGISTER_CODE(R) kCode_##R,
     DOUBLE_REGISTERS(REGISTER_CODE)
@@ -174,19 +176,19 @@
   const char* ToString();
   bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
-  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
-  DoubleRegister low() const {
+  bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
+  FPURegister low() const {
     // Find low reg of a Double-reg pair, which is the reg itself.
     DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
-    DoubleRegister reg;
+    FPURegister reg;
     reg.reg_code = reg_code;
     DCHECK(reg.is_valid());
     return reg;
   }
-  DoubleRegister high() const {
+  FPURegister high() const {
     // Find high reg of a Doubel-reg pair, which is reg + 1.
     DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
-    DoubleRegister reg;
+    FPURegister reg;
     reg.reg_code = reg_code + 1;
     DCHECK(reg.is_valid());
     return reg;
@@ -201,8 +203,8 @@
     return 1 << reg_code;
   }
 
-  static DoubleRegister from_code(int code) {
-    DoubleRegister r = {code};
+  static FPURegister from_code(int code) {
+    FPURegister r = {code};
     return r;
   }
   void setcode(int f) {
@@ -227,8 +229,12 @@
 // but it is not in common use. Someday we will want to support this in v8.)
 
 // For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef DoubleRegister FPURegister;
-typedef DoubleRegister FloatRegister;
+typedef FPURegister FloatRegister;
+
+typedef FPURegister DoubleRegister;
+
+// TODO(mips) Define SIMD registers.
+typedef FPURegister Simd128Register;
 
 const DoubleRegister no_freg = {-1};
 
@@ -304,9 +310,6 @@
 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
 const FPUControlRegister FCSR = { kFCSRRegister };
 
-// TODO(mips) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
 // -----------------------------------------------------------------------------
 // Machine instruction Operands.
 
@@ -799,6 +802,9 @@
   void teq(Register rs, Register rt, uint16_t code);
   void tne(Register rs, Register rt, uint16_t code);
 
+  // Memory barrier instruction.
+  void sync();
+
   // Move from HI/LO register.
   void mfhi(Register rd);
   void mflo(Register rd);
@@ -1039,8 +1045,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
-
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
                                        intptr_t pc_delta);
@@ -1158,10 +1163,15 @@
 
   bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
 
+  inline int UnboundLabelsCount() { return unbound_labels_count_; }
+
  protected:
   // Load Scaled Address instruction.
   void lsa(Register rd, Register rt, Register rs, uint8_t sa);
 
+  // Helpers.
+  void LoadRegPlusOffsetToAt(const MemOperand& src);
+
   // Relocation for a type-recording IC has the AST id added to it.  This
   // member variable is a way to pass the information from the call site to
   // the relocation info.
@@ -1359,8 +1369,6 @@
   void GenInstrJump(Opcode opcode,
                      uint32_t address);
 
-  // Helpers.
-  void LoadRegPlusOffsetToAt(const MemOperand& src);
 
   // Labels.
   void print(Label* L);
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 9693a52..76d0640 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -603,16 +603,9 @@
     // a0: number of arguments
     // a1: constructor function
     // a3: new target
-    if (is_api_function) {
-      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ Call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(a0);
-      __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -825,6 +818,139 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : the value to pass to the generator
+  //  -- a1 : the JSGeneratorObject to resume
+  //  -- a2 : the resume mode (tagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(a1);
+
+  // Store input value into generator object.
+  __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
+  __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, v0, a3,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  __ li(t1, Operand(step_in_enabled));
+  __ lb(t1, MemOperand(t1));
+  __ Branch(&skip_flooding, eq, t1, Operand(zero_reg));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2, t0);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(a1, a2);
+    __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Push receiver.
+  __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+  __ Push(t1);
+
+  // ----------- S t a t e -------------
+  //  -- a1    : the JSGeneratorObject to resume
+  //  -- a2    : the resume mode (tagged)
+  //  -- t0    : generator function
+  //  -- cp    : generator context
+  //  -- ra    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a3,
+        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ Subu(a3, a3, Operand(Smi::FromInt(1)));
+    __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+  __ GetObjectType(a3, a3, a3);
+  __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+
+  // New-style (ignition/turbofan) generator object.
+  {
+    __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a0,
+         FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ SmiUntag(a0);
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ Move(a3, a1);
+    __ Move(a1, t0);
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+    __ Jump(a2);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(ra, fp);
+    __ Move(fp, sp);
+    __ Push(cp, t0);
+
+    // Restore the operand stack.
+    __ lw(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+    __ lw(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
+    __ Addu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ Lsa(a3, a0, a3, kPointerSizeLog2 - 1);
+    {
+      Label done_loop, loop;
+      __ bind(&loop);
+      __ Branch(&done_loop, eq, a0, Operand(a3));
+      __ lw(t1, MemOperand(a0));
+      __ Push(t1);
+      __ Branch(USE_DELAY_SLOT, &loop);
+      __ addiu(a0, a0, kPointerSize);  // In delay slot.
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+    __ sw(t1, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+
+    // Resume the generator function at the continuation.
+    __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+    __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(a2);
+    __ Addu(a3, a3, Operand(a2));
+    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ Move(v0, a1);  // Continuation expects generator object in v0.
+    __ Jump(a3);
+  }
+}
 
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
@@ -842,14 +968,16 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ PushStandardFrame(a1);
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into kInterpreterBytecodeRegister.
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   Label load_debug_bytecode_array, bytecode_array_loaded;
   Register debug_info = kInterpreterBytecodeArrayRegister;
@@ -861,8 +989,11 @@
         FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ JumpIfRoot(kInterpreterBytecodeArrayRegister,
+                Heap::kUndefinedValueRootIndex, &bytecode_array_not_present);
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ SmiTst(kInterpreterBytecodeArrayRegister, t0);
     __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
               Operand(zero_reg));
@@ -871,8 +1002,13 @@
               Operand(BYTECODE_ARRAY_TYPE));
   }
 
-  // Push new.target, bytecode array and zero for bytecode array offset.
-  __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+  // Load initial bytecode offset.
+  __ li(kInterpreterBytecodeOffsetRegister,
+        Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(t0, kInterpreterBytecodeOffsetRegister);
+  __ Push(a3, kInterpreterBytecodeArrayRegister, t0);
 
   // Allocate the local and temporary register file on the stack.
   {
@@ -902,17 +1038,8 @@
     __ Branch(&loop_header, ge, t0, Operand(zero_reg));
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load bytecode offset and dispatch table into registers.
+  // Load accumulator and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ Addu(kInterpreterRegisterFileRegister, fp,
-          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ li(kInterpreterBytecodeOffsetRegister,
-        Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
   __ li(kInterpreterDispatchTableRegister,
         Operand(ExternalReference::interpreter_dispatch_table_address(
             masm->isolate())));
@@ -923,42 +1050,41 @@
   __ lbu(a0, MemOperand(a0));
   __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
   __ lw(at, MemOperand(at));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(at);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+  // The return value is in v0.
+
+  // Get the arguments + reciever count.
+  __ lw(t0, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lw(t0, FieldMemOperand(t0, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments and return.
+  __ Addu(sp, sp, t0);
+  __ Jump(ra);
 
   // Load debug copy of the bytecode array.
   __ bind(&load_debug_bytecode_array);
   __ lw(kInterpreterBytecodeArrayRegister,
         FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
   __ Branch(&bytecode_array_loaded);
-}
 
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
-
-  // The return value is in accumulator, which is already in v0.
-
-  // Leave the frame (also dropping the register file).
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ bind(&bytecode_array_not_present);
   __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  // Drop receiver + arguments and return.
-  __ lw(at, FieldMemOperand(kInterpreterBytecodeArrayRegister,
-                            BytecodeArray::kParameterSizeOffset));
-  __ Addu(sp, sp, at);
-  __ Jump(ra);
+  __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kCodeOffset));
+  __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(a1, t0, t1);
+  __ Jump(t0);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -991,7 +1117,6 @@
           RelocInfo::CODE_TARGET);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -1022,25 +1147,24 @@
   __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+  __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
+                          Code::kHeaderSize - kHeapObjectTag));
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register and dispatch table register.
-  __ Addu(kInterpreterRegisterFileRegister, fp,
-          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  // Initialize the dispatch table register.
   __ li(kInterpreterDispatchTableRegister,
         Operand(ExternalReference::interpreter_dispatch_table_address(
             masm->isolate())));
 
-  // Get the context from the frame.
-  __ lw(kContextRegister,
-        MemOperand(kInterpreterRegisterFileRegister,
-                   InterpreterFrameConstants::kContextFromRegisterPointer));
-
   // Get the bytecode array pointer from the frame.
-  __ lw(
-      kInterpreterBytecodeArrayRegister,
-      MemOperand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+  __ lw(kInterpreterBytecodeArrayRegister,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1054,9 +1178,7 @@
 
   // Get the target bytecode offset from the frame.
   __ lw(kInterpreterBytecodeOffsetRegister,
-        MemOperand(
-            kInterpreterRegisterFileRegister,
-            InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
   // Dispatch to the target bytecode.
@@ -1065,63 +1187,164 @@
   __ lbu(a1, MemOperand(a1));
   __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
   __ lw(a1, MemOperand(a1));
-  __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(a1);
 }
 
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ push(a1);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register.
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a3 : new target (preserved for callee)
+  //  -- a1 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register argument_count = a0;
+  Register closure = a1;
+  Register new_target = a3;
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = a0;
+  Register index = a2;
+  __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+
+  // Find literals.
+  // a3  : native context
+  // a2  : length / index
+  // a0  : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = a3;
+  __ lw(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = a1;
+  Register array_pointer = t1;
+
+  // Does the native context match?
+  __ sll(at, index, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(array_pointer, map, Operand(at));
+  __ lw(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousContext));
+  __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+  // OSR id set to none?
+  __ lw(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+  // Literals available?
+  __ lw(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ lw(t0, MemOperand(sp, 0));
+  __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+  __ push(index);
+  __ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = t0;
+  __ lw(entry,
+        FieldMemOperand(array_pointer,
+                        SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, t1);
+
+  // Link the closure into the optimized function list.
+  // t0 : code entry
+  // a3 : native context
+  // a1 : closure
+  __ lw(t1,
+        ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ sw(closure,
+        ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  // Save closure before the write barrier.
+  __ mov(t1, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
+                            kRAHasNotBeenSaved, kDontSaveFPRegs);
+  __ mov(closure, t1);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ Jump(entry);
+
+  __ bind(&loop_bottom);
+  __ Subu(index, index,
+          Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+                                        SharedFunctionInfo::kSharedCodeIndex));
+  __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ And(t1, t1, Operand(Code::KindField::kMask));
+  __ srl(t1, t1, Code::KindField::kShift);
+  __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
+  // Yes, install the full code.
+  __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, t1);
+  __ Jump(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
@@ -1264,15 +1487,17 @@
   __ SmiUntag(t2);
   // Switch on the state.
   Label with_tos_register, unknown_state;
-  __ Branch(&with_tos_register,
-            ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ Branch(&with_tos_register, ne, t2,
+            Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
   __ Ret(USE_DELAY_SLOT);
   // Safe to fill delay slot Addu will emit one instruction.
   __ Addu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
 
   __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
   __ lw(v0, MemOperand(sp, 1 * kPointerSize));
-  __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+  __ Branch(&unknown_state, ne, t2,
+            Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
 
   __ Ret(USE_DELAY_SLOT);
   // Safe to fill delay slot Addu will emit one instruction.
@@ -1478,28 +1703,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0    : argc
-  //  -- sp[0] : first argument (left-hand side)
-  //  -- sp[4] : receiver (right-hand side)
-  // -----------------------------------
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ lw(InstanceOfDescriptor::LeftRegister(),
-          MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
-    __ lw(InstanceOfDescriptor::RightRegister(),
-          MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ DropAndRet(2);
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0    : argc
@@ -2469,6 +2672,30 @@
           RelocInfo::CODE_TARGET);
 }
 
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : requested object size (untagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ SmiTag(a0);
+  __ Push(a0);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : requested object size (untagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ SmiTag(a0);
+  __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(a0, a1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // State setup as expected by MacroAssembler::InvokePrologue.
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index fd286fb..4084964 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -54,12 +54,6 @@
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -72,11 +66,6 @@
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1465,128 +1454,6 @@
 }
 
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = a1;              // Object (lhs).
-  Register const function = a0;            // Function (rhs).
-  Register const object_map = a2;          // Map of {object}.
-  Register const function_map = a3;        // Map of {function}.
-  Register const function_prototype = t0;  // Prototype of {function}.
-  Register const scratch = t1;
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
-  __ Branch(&fast_case, ne, function, Operand(at));
-  __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
-  __ Branch(&fast_case, ne, object_map, Operand(at));
-  __ Ret(USE_DELAY_SLOT);
-  __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);  // In delay slot.
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ GetObjectType(function, function_map, scratch);
-  __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ LoadRoot(v0, Heap::kFalseValueRootIndex);  // In delay slot.
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ GetObjectType(function, function_map, scratch);
-  __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
-  // Go to the runtime if the function is not a constructor.
-  __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
-  __ And(at, scratch, Operand(1 << Map::kIsConstructor));
-  __ Branch(&slow_case, eq, at, Operand(zero_reg));
-
-  // Ensure that {function} has an instance prototype.
-  __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
-  __ Branch(&slow_case, ne, at, Operand(zero_reg));
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ lw(function_prototype,
-        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  __ GetObjectType(function_prototype, scratch, scratch);
-  __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
-  __ lw(function_prototype,
-        FieldMemOperand(function_prototype, Map::kPrototypeOffset));
-  __ bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Register const object_instance_type = function_map;
-  Register const map_bit_field = function_map;
-  Register const null = scratch;
-  Register const result = v0;
-
-  Label done, loop, fast_runtime_fallback;
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ LoadRoot(null, Heap::kNullValueRootIndex);
-  __ bind(&loop);
-
-  // Check if the object needs to be access checked.
-  __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
-  __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
-  // Check if the current object is a Proxy.
-  __ lbu(object_instance_type,
-         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
-  __ Branch(&fast_runtime_fallback, eq, object_instance_type,
-            Operand(JS_PROXY_TYPE));
-
-  __ lw(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ Branch(&done, eq, object, Operand(function_prototype));
-  __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
-  __ lw(object_map,
-        FieldMemOperand(object, HeapObject::kMapOffset));  // In delay slot.
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-  __ Ret(USE_DELAY_SLOT);
-  __ StoreRoot(result,
-               Heap::kInstanceofCacheAnswerRootIndex);  // In delay slot.
-
-  // Found Proxy or access check needed: Call the runtime
-  __ bind(&fast_runtime_fallback);
-  __ Push(object, function_prototype);
-  // Invalidate the instanceof cache.
-  DCHECK(Smi::FromInt(0) == 0);
-  __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ Push(object, function);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
-
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
@@ -4037,8 +3904,8 @@
   __ bind(&not_array);
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&miss, ne, at, Operand(feedback));
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
                                                receiver, name, feedback,
                                                receiver_map, scratch1, t5);
@@ -4179,8 +4046,8 @@
   __ bind(&not_array);
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&miss, ne, feedback, Operand(at));
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
       scratch1, scratch2);
@@ -4748,15 +4615,15 @@
   __ bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ sw(a2, MemOperand(v0, JSObject::kMapOffset));
+  __ sw(a2, FieldMemOperand(v0, JSObject::kMapOffset));
   __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(a3, MemOperand(v0, JSObject::kPropertiesOffset));
-  __ sw(a3, MemOperand(v0, JSObject::kElementsOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ Addu(a1, v0, Operand(JSObject::kHeaderSize));
+  __ Addu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
 
   // ----------- S t a t e -------------
-  //  -- v0 : result (untagged)
+  //  -- v0 : result (tagged)
   //  -- a1 : result fields (untagged)
   //  -- t1 : result end (untagged)
   //  -- a2 : initial map
@@ -4774,11 +4641,7 @@
   {
     // Initialize all in-object fields with undefined.
     __ InitializeFieldsWithFiller(a1, t1, a0);
-
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ Ret(USE_DELAY_SLOT);
-    __ Addu(v0, v0, Operand(kHeapObjectTag));  // In delay slot.
+    __ Ret();
   }
   __ bind(&slack_tracking);
   {
@@ -4801,9 +4664,7 @@
     Label finalize;
     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
     __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
-    __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ Addu(v0, v0, Operand(kHeapObjectTag));  // In delay slot.
+    __ Branch(&finalize, eq, a3, Operand(zero_reg));
     __ Ret();
 
     // Finalize the instance size.
@@ -4828,10 +4689,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(a2);
   }
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ Subu(v0, v0, Operand(kHeapObjectTag));
   __ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
   __ Lsa(t1, v0, t1, kPointerSizeLog2);
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ Subu(t1, t1, Operand(kHeapObjectTag));
   __ jmp(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4850,19 +4711,19 @@
   // -----------------------------------
   __ AssertFunction(a1);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make a2 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ Branch(USE_DELAY_SLOT, &loop_entry);
-    __ mov(a2, fp);  // In delay slot.
-    __ bind(&loop);
+  // Make a2 point to the JavaScript frame.
+  __ mov(a2, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&loop, ne, a1, Operand(a3));
+    __ Branch(&ok, eq, a1, Operand(a3));
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4893,7 +4754,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the rest parameter array in v0.
@@ -4935,7 +4796,7 @@
     Label allocate, done_allocate;
     __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
     __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
-    __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+    __ Allocate(a1, v0, a3, t0, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in v0.
@@ -4991,23 +4852,39 @@
   // -----------------------------------
   __ AssertFunction(a1);
 
+  // Make t0 point to the JavaScript frame.
+  __ mov(t0, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ lw(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
+    __ Branch(&ok, eq, a1, Operand(a3));
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
+  }
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   __ lw(a2,
         FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Lsa(a3, fp, a2, kPointerSizeLog2 - 1);
+  __ Lsa(a3, t0, a2, kPointerSizeLog2 - 1);
   __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
 
   // a1 : function
   // a2 : number of parameters (tagged)
   // a3 : parameters pointer
+  // t0 : Javascript frame pointer
   // Registers used over whole function:
   //  t1 : arguments count (tagged)
   //  t2 : mapped parameter count (tagged)
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
-  __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
   __ lw(a0, MemOperand(t0, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&adaptor_frame, eq, a0,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -5053,7 +4930,7 @@
   __ Addu(t5, t5, Operand(JSSloppyArgumentsObject::kSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
+  __ Allocate(t5, v0, t5, t0, &runtime, NO_ALLOCATION_FLAGS);
 
   // v0 = address of new object(s) (tagged)
   // a2 = argument count (smi-tagged)
@@ -5205,19 +5082,19 @@
   // -----------------------------------
   __ AssertFunction(a1);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make a2 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ Branch(USE_DELAY_SLOT, &loop_entry);
-    __ mov(a2, fp);  // In delay slot.
-    __ bind(&loop);
+  // Make a2 point to the JavaScript frame.
+  __ mov(a2, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&loop, ne, a1, Operand(a3));
+    __ Branch(&ok, eq, a1, Operand(a3));
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -5255,7 +5132,7 @@
   Label allocate, done_allocate;
   __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
   __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
-  __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+  __ Allocate(a1, v0, a3, t0, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in v0.
@@ -5608,7 +5485,11 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
+
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
 
   // Save context, callee and call data.
   __ Push(context, callee, call_data);
@@ -5632,7 +5513,7 @@
 
   // Allocate the v8::Arguments structure in the arguments' space since
   // it's not controlled by GC.
-  const int kApiStackSpace = 4;
+  const int kApiStackSpace = 3;
 
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
@@ -5649,8 +5530,6 @@
   // FunctionCallbackInfo::length_ = argc
   __ li(at, Operand(argc()));
   __ sw(at, MemOperand(a0, 2 * kPointerSize));
-  // FunctionCallbackInfo::is_construct_call_ = 0
-  __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5667,8 +5546,9 @@
   }
   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   int stack_space = 0;
-  int32_t stack_space_offset = 4 * kPointerSize;
+  int32_t stack_space_offset = 3 * kPointerSize;
   stack_space = argc() + FCA::kArgsLength + 1;
+  // TODO(adamk): Why are we clobbering this immediately?
   stack_space_offset = kInvalidStackOffset;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
                            stack_space_offset, return_value_operand,
@@ -5677,15 +5557,44 @@
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- sp[0]                        : name
-  //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- a2                           : api_function_address
-  // -----------------------------------
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
 
-  Register api_function_address = ApiGetterDescriptor::function_address();
-  DCHECK(api_function_address.is(a2));
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
+  Register scratch = t0;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+  Register api_function_address = a2;
+
+  // Here and below +1 is for name() pushed after the args_ array.
+  typedef PropertyCallbackArguments PCA;
+  __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+  __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+  __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+  __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+  __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+                                    kPointerSize));
+  __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
+  __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+  __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+  // should_throw_on_error -> false
+  DCHECK(Smi::FromInt(0) == nullptr);
+  __ sw(zero_reg,
+        MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+  __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+  __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
 
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5706,6 +5615,10 @@
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+  __ lw(api_function_address,
+        FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
   // +3 is to skip prolog, return address and name handle.
   MemOperand return_value_operand(
       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5714,7 +5627,6 @@
                            return_value_operand, NULL);
 }
 
-
 #undef __
 
 }  // namespace internal
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 1c6c169..63bbda3 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -733,13 +733,13 @@
   __ sll(scratch, length, 2);
   __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
   __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
-  // array: destination FixedDoubleArray, not tagged as heap object
+  // array: destination FixedDoubleArray, tagged as heap object
 
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
-  __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
   // Update receiver's map.
-  __ sw(scratch2, MemOperand(array, HeapObject::kMapOffset));
+  __ sw(scratch2, FieldMemOperand(array, HeapObject::kMapOffset));
 
   __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ RecordWriteField(receiver,
@@ -751,7 +751,7 @@
                       OMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
   // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ Addu(scratch1, array, Operand(kHeapObjectTag));
+  __ Addu(scratch1, array, Operand(kHeapObjectTag - kHeapObjectTag));
   __ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
   __ RecordWriteField(receiver,
                       JSObject::kElementsOffset,
@@ -766,7 +766,8 @@
   // Prepare for conversion loop.
   __ Addu(scratch1, elements,
       Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
+  __ Addu(scratch3, array,
+          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   __ Lsa(array_end, scratch3, length, 2);
 
   // Repurpose registers no longer in use.
@@ -886,8 +887,8 @@
   // array: destination FixedArray, not tagged as heap object
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ sw(scratch, MemOperand(array, HeapObject::kMapOffset));
+  __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ sw(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
 
   // Prepare for conversion loop.
   Register src_elements = elements;
@@ -897,7 +898,8 @@
   __ Addu(src_elements, src_elements, Operand(
         FixedDoubleArray::kHeaderSize - kHeapObjectTag
         + Register::kExponentOffset));
-  __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
+  __ Addu(dst_elements, array,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ Lsa(dst_end, dst_elements, dst_end, 1);
 
   // Allocating heap numbers in the loop below can fail and cause a jump to
@@ -912,8 +914,8 @@
   __ bind(&initialization_loop_entry);
   __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
 
-  __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
-  __ Addu(array, array, Operand(kHeapObjectTag));
+  __ Addu(dst_elements, array,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   // Using offsetted addresses.
   // dst_elements: begin of destination FixedArray element fields, not tagged
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index 4914251..f50a849 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -108,6 +108,19 @@
   (CpuFeatures::IsSupported(static_cast<CpuFeature>(check)))
 #endif
 
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const uint32_t kMipsLwrOffset = 0;
+const uint32_t kMipsLwlOffset = 3;
+const uint32_t kMipsSwrOffset = 0;
+const uint32_t kMipsSwlOffset = 3;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const uint32_t kMipsLwrOffset = 3;
+const uint32_t kMipsLwlOffset = 0;
+const uint32_t kMipsSwrOffset = 3;
+const uint32_t kMipsSwlOffset = 0;
+#else
+#error Unknown endianness
+#endif
 
 #define __STDC_FORMAT_MACROS
 #include <inttypes.h>
@@ -409,6 +422,7 @@
   MOVZ = ((1U << 3) + 2),
   MOVN = ((1U << 3) + 3),
   BREAK = ((1U << 3) + 5),
+  SYNC = ((1U << 3) + 7),
 
   MFHI = ((2U << 3) + 0),
   CLZ_R6 = ((2U << 3) + 0),
@@ -620,7 +634,6 @@
   NULLSF = 0U
 };
 
-
 // ----- Emulated conditions.
 // On MIPS we use this enum to abstract from conditional branch instructions.
 // The 'U' prefix is used to specify unsigned comparisons.
@@ -928,8 +941,7 @@
       FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) |
       FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) |
       FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
-      FunctionFieldToBitNumber(SELNEZ_S);
-
+      FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
 
   // Get the encoding type of the instruction.
   inline Type InstructionType(TypeChecks checks = NORMAL) const;
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index 7e0a480..e1890ee 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -1191,6 +1191,9 @@
     case TNE:
       Format(instr, "tne     'rs, 'rt, code: 'code");
       break;
+    case SYNC:
+      Format(instr, "sync");
+      break;
     case MOVZ:
       Format(instr, "movz    'rd, 'rs, 'rt");
       break;
diff --git a/src/mips/interface-descriptors-mips.cc b/src/mips/interface-descriptors-mips.cc
index 06e3b77..30a7a74 100644
--- a/src/mips/interface-descriptors-mips.cc
+++ b/src/mips/interface-descriptors-mips.cc
@@ -46,16 +46,11 @@
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
 
 
-const Register InstanceOfDescriptor::LeftRegister() { return a1; }
-const Register InstanceOfDescriptor::RightRegister() { return a0; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return a1; }
 const Register StringCompareDescriptor::RightRegister() { return a0; }
 
-
-const Register ApiGetterDescriptor::function_address() { return a2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return a0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
 
 const Register MathPowTaggedDescriptor::exponent() { return a2; }
 
@@ -68,6 +63,8 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return a0; }
+const Register HasPropertyDescriptor::KeyRegister() { return a3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -247,13 +244,16 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  // register state
+  // a0 -- number of arguments
+  // a1 -- function
+  // a2 -- allocation site with elements kind
+  Register registers[] = {a1, a2, a0};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
@@ -317,6 +317,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -377,9 +382,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
-      kInterpreterDispatchTableRegister};
+      kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+      kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -414,6 +418,16 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      v0,  // the value to pass to the generator
+      a1,  // the JSGeneratorObject to resume
+      a2   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 7cbbd3a..3dbfd6b 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1192,14 +1192,199 @@
 // ------------Pseudo-instructions-------------
 
 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
-  lwr(rd, rs);
-  lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    lw(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    if (is_int16(rs.offset() + kMipsLwrOffset) &&
+        is_int16(rs.offset() + kMipsLwlOffset)) {
+      if (!rd.is(rs.rm())) {
+        lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+        lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+      } else {
+        lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+        lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+        mov(rd, at);
+      }
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+      lwr(rd, MemOperand(at, kMipsLwrOffset));
+      lwl(rd, MemOperand(at, kMipsLwlOffset));
+    }
+  }
 }
 
 
 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
-  swr(rd, rs);
-  swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    sw(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    if (is_int16(rs.offset() + kMipsSwrOffset) &&
+        is_int16(rs.offset() + kMipsSwlOffset)) {
+      swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
+      swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
+    } else {
+      LoadRegPlusOffsetToAt(rs);
+      swr(rd, MemOperand(at, kMipsSwrOffset));
+      swl(rd, MemOperand(at, kMipsSwlOffset));
+    }
+  }
+}
+
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    lh(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(at, rs);
+      lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+      lb(rd, rs);
+#endif
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lb(rd, MemOperand(at, 1));
+      lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lb(rd, MemOperand(at, 0));
+      lbu(at, MemOperand(at, 1));
+#endif
+    }
+    sll(rd, rd, 8);
+    or_(rd, rd, at);
+  }
+}
+
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    lhu(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(at, rs);
+      lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+      lbu(rd, rs);
+#endif
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(rd, MemOperand(at, 1));
+      lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(rd, MemOperand(at, 0));
+      lbu(at, MemOperand(at, 1));
+#endif
+    }
+    sll(rd, rd, 8);
+    or_(rd, rd, at);
+  }
+}
+
+void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  DCHECK(!rs.rm().is(scratch));
+  DCHECK(!scratch.is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    sh(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    MemOperand source = rs;
+    // If offset > 16 bits, load address to at with offset 0.
+    if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
+      LoadRegPlusOffsetToAt(rs);
+      source = MemOperand(at, 0);
+    }
+
+    if (!scratch.is(rd)) {
+      mov(scratch, rd);
+    }
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+    sb(scratch, source);
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+    sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+    srl(scratch, scratch, 8);
+    sb(scratch, source);
+#endif
+  }
+}
+
+void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  if (IsMipsArchVariant(kMips32r6)) {
+    lwc1(fd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    Ulw(scratch, rs);
+    mtc1(scratch, fd);
+  }
+}
+
+void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  if (IsMipsArchVariant(kMips32r6)) {
+    swc1(fd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    mfc1(scratch, fd);
+    Usw(scratch, rs);
+  }
+}
+
+void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  DCHECK(!scratch.is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    ldc1(fd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
+    mtc1(scratch, fd);
+    Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
+    Mthc1(scratch, fd);
+  }
+}
+
+void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  DCHECK(!scratch.is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    sdc1(fd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    mfc1(scratch, fd);
+    Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
+    Mfhc1(scratch, fd);
+    Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
+  }
 }
 
 
@@ -1354,6 +1539,252 @@
   addiu(sp, sp, stack_offset);
 }
 
+void MacroAssembler::AddPair(Register dst_low, Register dst_high,
+                             Register left_low, Register left_high,
+                             Register right_low, Register right_high) {
+  Label no_overflow;
+  Register kScratchReg = s3;
+  Register kScratchReg2 = s4;
+  // Add lower word
+  Addu(dst_low, left_low, right_low);
+  Addu(dst_high, left_high, right_high);
+  // Check for lower word unsigned overflow
+  Sltu(kScratchReg, dst_low, left_low);
+  Sltu(kScratchReg2, dst_low, right_low);
+  Or(kScratchReg, kScratchReg2, kScratchReg);
+  Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
+  // Increment higher word if there was overflow
+  Addu(dst_high, dst_high, 0x1);
+  bind(&no_overflow);
+}
+
+void MacroAssembler::SubPair(Register dst_low, Register dst_high,
+                             Register left_low, Register left_high,
+                             Register right_low, Register right_high) {
+  Label no_overflow;
+  Register kScratchReg = s3;
+  // Subtract lower word
+  Subu(dst_low, left_low, right_low);
+  Subu(dst_high, left_high, right_high);
+  // Check for lower word unsigned underflow
+  Sltu(kScratchReg, left_low, right_low);
+  Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
+  // Decrement higher word if there was underflow
+  Subu(dst_high, dst_high, 0x1);
+  bind(&no_overflow);
+}
+
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register shift) {
+  Label less_than_32;
+  Label zero_shift;
+  Label word_shift;
+  Label done;
+  Register kScratchReg = s3;
+  And(shift, shift, 0x3F);
+  li(kScratchReg, 0x20);
+  Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+  Branch(&word_shift, eq, shift, Operand(kScratchReg));
+  // Shift more than 32
+  Subu(kScratchReg, shift, kScratchReg);
+  mov(dst_low, zero_reg);
+  sllv(dst_high, src_low, kScratchReg);
+  Branch(&done);
+  // Word shift
+  bind(&word_shift);
+  mov(dst_low, zero_reg);
+  mov(dst_high, src_low);
+  Branch(&done);
+
+  bind(&less_than_32);
+  // Check if zero shift
+  Branch(&zero_shift, eq, shift, Operand(zero_reg));
+  // Shift less than 32
+  Subu(kScratchReg, kScratchReg, shift);
+  sllv(dst_high, src_high, shift);
+  sllv(dst_low, src_low, shift);
+  srlv(kScratchReg, src_low, kScratchReg);
+  Or(dst_high, dst_high, kScratchReg);
+  Branch(&done);
+  // Zero shift
+  bind(&zero_shift);
+  mov(dst_low, src_low);
+  mov(dst_high, src_high);
+  bind(&done);
+}
+
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  Register kScratchReg = s3;
+  shift = shift & 0x3F;
+  if (shift < 32) {
+    if (shift == 0) {
+      mov(dst_low, src_low);
+      mov(dst_high, src_high);
+    } else {
+      sll(dst_high, src_high, shift);
+      sll(dst_low, src_low, shift);
+      shift = 32 - shift;
+      srl(kScratchReg, src_low, shift);
+      Or(dst_high, dst_high, kScratchReg);
+    }
+  } else {
+    if (shift == 32) {
+      mov(dst_low, zero_reg);
+      mov(dst_high, src_low);
+    } else {
+      shift = shift - 32;
+      mov(dst_low, zero_reg);
+      sll(dst_high, src_low, shift);
+    }
+  }
+}
+
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register shift) {
+  Label less_than_32;
+  Label zero_shift;
+  Label word_shift;
+  Label done;
+  Register kScratchReg = s3;
+  And(shift, shift, 0x3F);
+  li(kScratchReg, 0x20);
+  Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+  Branch(&word_shift, eq, shift, Operand(kScratchReg));
+  // Shift more than 32
+  Subu(kScratchReg, shift, kScratchReg);
+  mov(dst_high, zero_reg);
+  srlv(dst_low, src_high, kScratchReg);
+  Branch(&done);
+  // Word shift
+  bind(&word_shift);
+  mov(dst_high, zero_reg);
+  mov(dst_low, src_high);
+  Branch(&done);
+
+  bind(&less_than_32);
+  // Check if zero shift
+  Branch(&zero_shift, eq, shift, Operand(zero_reg));
+  // Shift less than 32
+  Subu(kScratchReg, kScratchReg, shift);
+  srlv(dst_high, src_high, shift);
+  srlv(dst_low, src_low, shift);
+  sllv(kScratchReg, src_high, kScratchReg);
+  Or(dst_low, dst_low, kScratchReg);
+  Branch(&done);
+  // Zero shift
+  bind(&zero_shift);
+  mov(dst_low, src_low);
+  mov(dst_high, src_high);
+  bind(&done);
+}
+
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  Register kScratchReg = s3;
+  shift = shift & 0x3F;
+  if (shift < 32) {
+    if (shift == 0) {
+      mov(dst_low, src_low);
+      mov(dst_high, src_high);
+    } else {
+      srl(dst_high, src_high, shift);
+      srl(dst_low, src_low, shift);
+      shift = 32 - shift;
+      sll(kScratchReg, src_high, shift);
+      Or(dst_low, dst_low, kScratchReg);
+    }
+  } else {
+    if (shift == 32) {
+      mov(dst_high, zero_reg);
+      mov(dst_low, src_high);
+    } else {
+      shift = shift - 32;
+      mov(dst_high, zero_reg);
+      srl(dst_low, src_high, shift);
+    }
+  }
+}
+
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register shift) {
+  Label less_than_32;
+  Label zero_shift;
+  Label word_shift;
+  Label done;
+  Register kScratchReg = s3;
+  Register kScratchReg2 = s4;
+  And(shift, shift, 0x3F);
+  li(kScratchReg, 0x20);
+  Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+  Branch(&word_shift, eq, shift, Operand(kScratchReg));
+
+  // Shift more than 32
+  li(kScratchReg2, 0x1F);
+  Subu(kScratchReg, shift, kScratchReg);
+  srav(dst_high, src_high, kScratchReg2);
+  srav(dst_low, src_high, kScratchReg);
+  Branch(&done);
+  // Word shift
+  bind(&word_shift);
+  li(kScratchReg2, 0x1F);
+  srav(dst_high, src_high, kScratchReg2);
+  mov(dst_low, src_high);
+  Branch(&done);
+
+  bind(&less_than_32);
+  // Check if zero shift
+  Branch(&zero_shift, eq, shift, Operand(zero_reg));
+
+  // Shift less than 32
+  Subu(kScratchReg, kScratchReg, shift);
+  srav(dst_high, src_high, shift);
+  srlv(dst_low, src_low, shift);
+  sllv(kScratchReg, src_high, kScratchReg);
+  Or(dst_low, dst_low, kScratchReg);
+  Branch(&done);
+  // Zero shift
+  bind(&zero_shift);
+  mov(dst_low, src_low);
+  mov(dst_high, src_high);
+  bind(&done);
+}
+
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  Register kScratchReg = s3;
+  shift = shift & 0x3F;
+  if (shift < 32) {
+    if (shift == 0) {
+      mov(dst_low, src_low);
+      mov(dst_high, src_high);
+    } else {
+      sra(dst_high, src_high, shift);
+      srl(dst_low, src_low, shift);
+      shift = 32 - shift;
+      sll(kScratchReg, src_high, shift);
+      Or(dst_low, dst_low, kScratchReg);
+    }
+  } else {
+    if (shift == 32) {
+      sra(dst_high, src_high, 31);
+      mov(dst_low, src_high);
+    } else {
+      shift = shift - 32;
+      sra(dst_high, src_high, 31);
+      sra(dst_low, src_high, shift);
+    }
+  }
+}
 
 void MacroAssembler::Ext(Register rt,
                          Register rs,
@@ -3654,6 +4085,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3726,18 +4158,21 @@
   // to calculate the new top.
   Addu(result_end, result, Operand(object_size));
   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
-  sw(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    Addu(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    sw(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  Addu(result, result, Operand(kHeapObjectTag));
 }
 
 
 void MacroAssembler::Allocate(Register object_size, Register result,
                               Register result_end, Register scratch,
                               Label* gc_required, AllocationFlags flags) {
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3810,6 +4245,7 @@
   } else {
     Addu(result_end, result, Operand(object_size));
   }
+
   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
 
   // Update allocation top. result temporarily holds the new top.
@@ -3817,14 +4253,104 @@
     And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
     Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
   }
-  sw(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    Addu(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    sw(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  Addu(result, result, Operand(kHeapObjectTag));
 }
 
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register scratch1, Register scratch2,
+                                  AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address and allocation limit registers.
+  Register top_address = scratch1;
+  // This code stores a temporary value in t9.
+  Register result_end = scratch2;
+  li(top_address, Operand(allocation_top));
+  lw(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    And(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    Branch(&aligned, eq, result_end, Operand(zero_reg));
+    li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    sw(result_end, MemOperand(result));
+    Addu(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
+  Addu(result_end, result, Operand(object_size));
+
+  // The top pointer is not updated for allocation folding dominators.
+  sw(result_end, MemOperand(top_address));
+
+  Addu(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, Register scratch,
+                                  AllocationFlags flags) {
+  // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+  // is not specified. Other registers must not overlap.
+  DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+  DCHECK(!AreAliased(result_end, result, scratch, t9, at));
+  DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address and allocation limit registers.
+  Register top_address = scratch;
+  // This code stores a temporary value in t9.
+  li(top_address, Operand(allocation_top));
+  lw(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    And(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    Branch(&aligned, eq, result_end, Operand(zero_reg));
+    li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    sw(result_end, MemOperand(result));
+    Addu(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    Lsa(result_end, result, object_size, kPointerSizeLog2);
+  } else {
+    Addu(result_end, result, Operand(object_size));
+  }
+
+  // The top pointer is not updated for allocation folding dominators.
+  sw(result_end, MemOperand(top_address));
+
+  Addu(result, result, Operand(kHeapObjectTag));
+}
 
 void MacroAssembler::AllocateTwoByteString(Register result,
                                            Register length,
@@ -3841,12 +4367,8 @@
   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate two-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result,
@@ -3869,12 +4391,8 @@
   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate one-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -3888,7 +4406,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
   InitializeNewString(result,
                       length,
                       Heap::kConsStringMapRootIndex,
@@ -3901,12 +4419,8 @@
                                                Register scratch1,
                                                Register scratch2,
                                                Label* gc_required) {
-  Allocate(ConsString::kSize,
-           result,
-           scratch1,
-           scratch2,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -3919,7 +4433,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result,
                       length,
@@ -3935,7 +4449,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -3961,12 +4475,11 @@
                                         Register scratch2,
                                         Register heap_number_map,
                                         Label* need_gc,
-                                        TaggingMode tagging_mode,
                                         MutableMode mode) {
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
-           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+           NO_ALLOCATION_FLAGS);
 
   Heap::RootListIndex map_index = mode == MUTABLE
       ? Heap::kMutableHeapNumberMapRootIndex
@@ -3974,11 +4487,7 @@
   AssertIsRoot(heap_number_map, map_index);
 
   // Store heap number map in the allocated object.
-  if (tagging_mode == TAG_RESULT) {
-    sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-  } else {
-    sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
-  }
+  sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
 }
 
 
@@ -4002,7 +4511,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -5615,6 +6125,16 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    SmiTst(object, t8);
+    Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+    GetObjectType(object, t8, t8);
+    Check(eq, kOperandIsNotAGeneratorObject, t8,
+          Operand(JS_GENERATOR_OBJECT_TYPE));
+  }
+}
 
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
@@ -6089,7 +6609,7 @@
                                                      Label* no_memento_found) {
   Label map_check;
   Label top_check;
-  ExternalReference new_space_allocation_top =
+  ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -6099,7 +6619,9 @@
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
   Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
-  Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+  li(at, Operand(new_space_allocation_top_adr));
+  lw(at, MemOperand(at));
+  Xor(scratch_reg, scratch_reg, Operand(at));
   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
   Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
   // The object is on a different page than allocation top. Bail out if the
@@ -6115,7 +6637,7 @@
   // we are below top.
   bind(&top_check);
   Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
-  li(at, Operand(new_space_allocation_top));
+  li(at, Operand(new_space_allocation_top_adr));
   lw(at, MemOperand(at));
   Branch(no_memento_found, gt, scratch_reg, Operand(at));
   // Memento map check.
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 2f02865..2417025 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -18,8 +18,8 @@
 const Register kReturnRegister2 = {Register::kCode_a0};
 const Register kJSFunctionRegister = {Register::kCode_a1};
 const Register kContextRegister = {Register::kCpRegister};
+const Register kAllocateSizeRegister = {Register::kCode_a0};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_t3};
 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};
 const Register kInterpreterDispatchTableRegister = {Register::kCode_t6};
@@ -555,6 +555,15 @@
   void Allocate(Register object_size, Register result, Register result_new,
                 Register scratch, Label* gc_required, AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(int object_size, Register result, Register scratch1,
+                    Register scratch2, AllocationFlags flags);
+
+  void FastAllocate(Register object_size, Register result, Register result_new,
+                    Register scratch, AllocationFlags flags);
+
   void AllocateTwoByteString(Register result,
                              Register length,
                              Register scratch1,
@@ -589,7 +598,6 @@
                           Register scratch2,
                           Register heap_number_map,
                           Label* gc_required,
-                          TaggingMode tagging_mode = TAG_RESULT,
                           MutableMode mode = IMMUTABLE);
   void AllocateHeapNumberWithValue(Register result,
                                    FPURegister value,
@@ -681,9 +689,19 @@
 
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
+  void Ulh(Register rd, const MemOperand& rs);
+  void Ulhu(Register rd, const MemOperand& rs);
+  void Ush(Register rd, const MemOperand& rs, Register scratch);
+
   void Ulw(Register rd, const MemOperand& rs);
   void Usw(Register rd, const MemOperand& rs);
 
+  void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
+  void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
+  void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
+  void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
   // Load int32 in the rd register.
   void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
   inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
@@ -809,6 +827,31 @@
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
 
+  // Int64Lowering instructions
+  void AddPair(Register dst_low, Register dst_high, Register left_low,
+               Register left_high, Register right_low, Register right_high);
+
+  void SubPair(Register dst_low, Register dst_high, Register left_low,
+               Register left_high, Register right_low, Register right_high);
+
+  void ShlPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, Register shift);
+
+  void ShlPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, uint32_t shift);
+
+  void ShrPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, Register shift);
+
+  void ShrPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, uint32_t shift);
+
+  void SarPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, Register shift);
+
+  void SarPair(Register dst_low, Register dst_high, Register src_low,
+               Register src_high, uint32_t shift);
+
   // ---------------------------------------------------------------------------
   // FPU macros. These do not handle special cases like NaN or +- inf.
 
@@ -1546,6 +1589,10 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
   void AssertReceiver(Register object);
 
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index e37b6e1..f8dc515 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -586,7 +586,7 @@
         }
 
         while (cur < end) {
-          PrintF("  0x%08x:  0x%08x %10d",
+          PrintF("  0x%08" PRIxPTR ":  0x%08x %10d",
                  reinterpret_cast<intptr_t>(cur), *cur, *cur);
           HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
           int value = *cur;
@@ -649,8 +649,8 @@
 
         while (cur < end) {
           dasm.InstructionDecode(buffer, cur);
-          PrintF("  0x%08x  %s\n",
-              reinterpret_cast<intptr_t>(cur), buffer.start());
+          PrintF("  0x%08" PRIxPTR "  %s\n", reinterpret_cast<intptr_t>(cur),
+                 buffer.start());
           cur += Instruction::kInstrSize;
         }
       } else if (strcmp(cmd, "gdb") == 0) {
@@ -771,8 +771,8 @@
 
         while (cur < end) {
           dasm.InstructionDecode(buffer, cur);
-          PrintF("  0x%08x  %s\n",
-                 reinterpret_cast<intptr_t>(cur), buffer.start());
+          PrintF("  0x%08" PRIxPTR "  %s\n", reinterpret_cast<intptr_t>(cur),
+                 buffer.start());
           cur += Instruction::kInstrSize;
         }
       } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
@@ -1786,12 +1786,12 @@
 int Simulator::ReadW(int32_t addr, Instruction* instr) {
   if (addr >=0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
-    PrintF("Memory read from bad address: 0x%08x, pc=0x%08x\n",
-           addr, reinterpret_cast<intptr_t>(instr));
+    PrintF("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+           reinterpret_cast<intptr_t>(instr));
     MipsDebugger dbg(this);
     dbg.Debug();
   }
-  if ((addr & kPointerAlignmentMask) == 0) {
+  if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
     TraceMemRd(addr, static_cast<int32_t>(*ptr));
     return *ptr;
@@ -1808,12 +1808,12 @@
 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
   if (addr >= 0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
-    PrintF("Memory write to bad address: 0x%08x, pc=0x%08x\n",
-           addr, reinterpret_cast<intptr_t>(instr));
+    PrintF("Memory write to bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+           reinterpret_cast<intptr_t>(instr));
     MipsDebugger dbg(this);
     dbg.Debug();
   }
-  if ((addr & kPointerAlignmentMask) == 0) {
+  if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
     TraceMemWr(addr, value, WORD);
     *ptr = value;
@@ -1828,7 +1828,7 @@
 
 
 double Simulator::ReadD(int32_t addr, Instruction* instr) {
-  if ((addr & kDoubleAlignmentMask) == 0) {
+  if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
     double* ptr = reinterpret_cast<double*>(addr);
     return *ptr;
   }
@@ -1841,7 +1841,7 @@
 
 
 void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
-  if ((addr & kDoubleAlignmentMask) == 0) {
+  if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
     double* ptr = reinterpret_cast<double*>(addr);
     *ptr = value;
     return;
@@ -1854,7 +1854,7 @@
 
 
 uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
-  if ((addr & 1) == 0) {
+  if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     TraceMemRd(addr, static_cast<int32_t>(*ptr));
     return *ptr;
@@ -1868,7 +1868,7 @@
 
 
 int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
-  if ((addr & 1) == 0) {
+  if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
     int16_t* ptr = reinterpret_cast<int16_t*>(addr);
     TraceMemRd(addr, static_cast<int32_t>(*ptr));
     return *ptr;
@@ -1882,7 +1882,7 @@
 
 
 void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
-  if ((addr & 1) == 0) {
+  if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     TraceMemWr(addr, value, HALF);
     *ptr = value;
@@ -1896,7 +1896,7 @@
 
 
 void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
-  if ((addr & 1) == 0) {
+  if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
     int16_t* ptr = reinterpret_cast<int16_t*>(addr);
     TraceMemWr(addr, value, HALF);
     *ptr = value;
@@ -1953,7 +1953,7 @@
 
 // Unsupported instructions use Format to print an error and stop execution.
 void Simulator::Format(Instruction* instr, const char* format) {
-  PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+  PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR ": %s\n",
          reinterpret_cast<intptr_t>(instr), format);
   UNIMPLEMENTED_MIPS();
 }
@@ -3731,6 +3731,9 @@
     case TNE:
       do_interrupt = rs() != rt();
       break;
+    case SYNC:
+      // TODO(palfia): Ignore sync instruction for now.
+      break;
     // Conditional moves.
     case MOVN:
       if (rt()) {
@@ -4413,8 +4416,9 @@
       UNSUPPORTED();
   }
   if (::v8::internal::FLAG_trace_sim) {
-    PrintF("  0x%08x  %-44s   %s\n", reinterpret_cast<intptr_t>(instr),
-           buffer.start(), trace_buf_.start());
+    PrintF("  0x%08" PRIxPTR "  %-44s   %s\n",
+           reinterpret_cast<intptr_t>(instr), buffer.start(),
+           trace_buf_.start());
   }
   if (!pc_modified_) {
     set_register(pc, reinterpret_cast<int32_t>(instr) +
diff --git a/src/mips64/assembler-mips64-inl.h b/src/mips64/assembler-mips64-inl.h
index dec58e8..7903094 100644
--- a/src/mips64/assembler-mips64-inl.h
+++ b/src/mips64/assembler-mips64-inl.h
@@ -102,11 +102,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Assembler::target_address_at(pc_, host_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) ||
          IsRuntimeEntry(rmode_) ||
@@ -158,19 +153,6 @@
   }
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
-                                   icache_flush_mode);
-}
-
 Address Assembler::target_address_from_return_address(Address pc) {
   return pc - kCallTargetAddressOffset;
 }
@@ -362,7 +344,7 @@
   }
 }
 
-
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc
index 5a8dd2c..2b8bc72 100644
--- a/src/mips64/assembler-mips64.cc
+++ b/src/mips64/assembler-mips64.cc
@@ -167,6 +167,43 @@
   return false;
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return static_cast<uint32_t>(
+      reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_memory_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_memory_reference &&
+           updated_memory_reference < new_base + new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
+        icache_flush_mode);
+  } else {
+    UNREACHABLE();
+  }
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of Operand and MemOperand.
@@ -2001,11 +2038,15 @@
 
 
 void Assembler::lwl(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
 }
 
 
 void Assembler::lwr(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
 }
 
@@ -2041,11 +2082,15 @@
 
 
 void Assembler::swl(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
 }
 
 
 void Assembler::swr(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
 }
 
@@ -2084,21 +2129,29 @@
 
 
 void Assembler::ldl(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
 }
 
 
 void Assembler::ldr(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
 }
 
 
 void Assembler::sdl(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
 }
 
 
 void Assembler::sdr(Register rd, const MemOperand& rs) {
+  DCHECK(is_int16(rs.offset_));
+  DCHECK(kArchVariant == kMips64r2);
   GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
 }
 
@@ -2255,6 +2308,10 @@
   emit(instr);
 }
 
+void Assembler::sync() {
+  Instr sync_instr = SPECIAL | SYNC;
+  emit(sync_instr);
+}
 
 // Move from HI/LO register.
 
@@ -3208,6 +3265,7 @@
     data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
   } else {
     data = jump_address(label);
+    unbound_labels_count_++;
     internal_reference_positions_.insert(label->pos());
   }
   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h
index de09366..f93bc48 100644
--- a/src/mips64/assembler-mips64.h
+++ b/src/mips64/assembler-mips64.h
@@ -63,6 +63,8 @@
   V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
   V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
 
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+
 #define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
   V(f0)  V(f2)  V(f4)  V(f6)  V(f8)  V(f10) V(f12) V(f14) \
   V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
@@ -154,7 +156,7 @@
 Register ToRegister(int num);
 
 // Coprocessor register.
-struct DoubleRegister {
+struct FPURegister {
   enum Code {
 #define REGISTER_CODE(R) kCode_##R,
     DOUBLE_REGISTERS(REGISTER_CODE)
@@ -174,21 +176,21 @@
   const char* ToString();
   bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
-  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
-  DoubleRegister low() const {
+  bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
+  FPURegister low() const {
     // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
     // Find low reg of a Double-reg pair, which is the reg itself.
     DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
-    DoubleRegister reg;
+    FPURegister reg;
     reg.reg_code = reg_code;
     DCHECK(reg.is_valid());
     return reg;
   }
-  DoubleRegister high() const {
+  FPURegister high() const {
     // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
     // Find high reg of a Doubel-reg pair, which is reg + 1.
     DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
-    DoubleRegister reg;
+    FPURegister reg;
     reg.reg_code = reg_code + 1;
     DCHECK(reg.is_valid());
     return reg;
@@ -203,8 +205,8 @@
     return 1 << reg_code;
   }
 
-  static DoubleRegister from_code(int code) {
-    DoubleRegister r = {code};
+  static FPURegister from_code(int code) {
+    FPURegister r = {code};
     return r;
   }
   void setcode(int f) {
@@ -229,8 +231,12 @@
 // but it is not in common use. Someday we will want to support this in v8.)
 
 // For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef DoubleRegister FPURegister;
-typedef DoubleRegister FloatRegister;
+typedef FPURegister FloatRegister;
+
+typedef FPURegister DoubleRegister;
+
+// TODO(mips64) Define SIMD registers.
+typedef FPURegister Simd128Register;
 
 const DoubleRegister no_freg = {-1};
 
@@ -306,9 +312,6 @@
 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
 const FPUControlRegister FCSR = { kFCSRRegister };
 
-// TODO(mips64) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
 // -----------------------------------------------------------------------------
 // Machine instruction Operands.
 const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -849,6 +852,9 @@
   void teq(Register rs, Register rt, uint16_t code);
   void tne(Register rs, Register rt, uint16_t code);
 
+  // Memory barrier instruction.
+  void sync();
+
   // Move from HI/LO register.
   void mfhi(Register rd);
   void mflo(Register rd);
@@ -1099,7 +1105,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
                                        intptr_t pc_delta);
@@ -1212,11 +1218,16 @@
 
   bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
 
+  inline int UnboundLabelsCount() { return unbound_labels_count_; }
+
  protected:
   // Load Scaled Address instructions.
   void lsa(Register rd, Register rt, Register rs, uint8_t sa);
   void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
 
+  // Helpers.
+  void LoadRegPlusOffsetToAt(const MemOperand& src);
+
   // Relocation for a type-recording IC has the AST id added to it.  This
   // member variable is a way to pass the information from the call site to
   // the relocation info.
@@ -1413,9 +1424,6 @@
   void GenInstrJump(Opcode opcode,
                      uint32_t address);
 
-  // Helpers.
-  void LoadRegPlusOffsetToAt(const MemOperand& src);
-
   // Labels.
   void print(Label* L);
   void bind_to(Label* L, int pos);
diff --git a/src/mips64/builtins-mips64.cc b/src/mips64/builtins-mips64.cc
index b55b77c..7a0d81a 100644
--- a/src/mips64/builtins-mips64.cc
+++ b/src/mips64/builtins-mips64.cc
@@ -592,16 +592,9 @@
     // a0: number of arguments
     // a1: constructor function
     // a3: new target
-    if (is_api_function) {
-      __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ Call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(a0);
-      __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -690,6 +683,140 @@
   Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : the value to pass to the generator
+  //  -- a1 : the JSGeneratorObject to resume
+  //  -- a2 : the resume mode (tagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(a1);
+
+  // Store input value into generator object.
+  __ sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
+  __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, v0, a3,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+  __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  __ li(t1, Operand(step_in_enabled));
+  __ lb(t1, MemOperand(t1));
+  __ Branch(&skip_flooding, eq, t1, Operand(zero_reg));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a2, a4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(a1, a2);
+    __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Push receiver.
+  __ ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+  __ Push(a5);
+
+  // ----------- S t a t e -------------
+  //  -- a1    : the JSGeneratorObject to resume
+  //  -- a2    : the resume mode (tagged)
+  //  -- a4    : generator function
+  //  -- cp    : generator context
+  //  -- ra    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a3,
+        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ Dsubu(a3, a3, Operand(1));
+    __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+  __ GetObjectType(a3, a3, a3);
+  __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+
+  // New-style (ignition/turbofan) generator object.
+  {
+    __ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a0,
+         FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ SmiUntag(a0);
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ Move(a3, a1);
+    __ Move(a1, a4);
+    __ ld(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+    __ Jump(a2);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(ra, fp);
+    __ Move(fp, sp);
+    __ Push(cp, a4);
+
+    // Restore the operand stack.
+    __ ld(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+    __ ld(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
+    __ SmiUntag(a3);
+    __ Daddu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ Dlsa(a3, a0, a3, kPointerSizeLog2);
+    {
+      Label done_loop, loop;
+      __ bind(&loop);
+      __ Branch(&done_loop, eq, a0, Operand(a3));
+      __ ld(a5, MemOperand(a0));
+      __ Push(a5);
+      __ Branch(USE_DELAY_SLOT, &loop);
+      __ daddiu(a0, a0, kPointerSize);  // In delay slot.
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
+    __ sd(a5, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+
+    // Resume the generator function at the continuation.
+    __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+    __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+    __ Daddu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(a2);
+    __ Daddu(a3, a3, Operand(a2));
+    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ Move(v0, a1);  // Continuation expects generator object in v0.
+    __ Jump(a3);
+  }
+}
 
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
   FrameScope scope(masm, StackFrame::INTERNAL);
@@ -813,7 +940,6 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
-
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
 // stack left to right.  The actual argument count matches the formal parameter
@@ -830,14 +956,16 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ PushStandardFrame(a1);
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into kInterpreterBytecodeRegister.
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   Label load_debug_bytecode_array, bytecode_array_loaded;
   Register debug_info = kInterpreterBytecodeArrayRegister;
@@ -849,8 +977,11 @@
         FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ JumpIfRoot(kInterpreterBytecodeArrayRegister,
+                Heap::kUndefinedValueRootIndex, &bytecode_array_not_present);
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ SmiTst(kInterpreterBytecodeArrayRegister, a4);
     __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
               Operand(zero_reg));
@@ -859,8 +990,13 @@
               Operand(BYTECODE_ARRAY_TYPE));
   }
 
-  // Push new.target, bytecode array and zero for bytecode array offset.
-  __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+  // Load initial bytecode offset.
+  __ li(kInterpreterBytecodeOffsetRegister,
+        Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
+  __ Push(a3, kInterpreterBytecodeArrayRegister, a4);
 
   // Allocate the local and temporary register file on the stack.
   {
@@ -890,17 +1026,8 @@
     __ Branch(&loop_header, ge, a4, Operand(zero_reg));
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load bytecode offset and dispatch table into registers.
+  // Load accumulator and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ Daddu(kInterpreterRegisterFileRegister, fp,
-           Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ li(kInterpreterBytecodeOffsetRegister,
-        Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
   __ li(kInterpreterDispatchTableRegister,
         Operand(ExternalReference::interpreter_dispatch_table_address(
             masm->isolate())));
@@ -911,42 +1038,41 @@
   __ lbu(a0, MemOperand(a0));
   __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
   __ ld(at, MemOperand(at));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(at);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+  // The return value is in v0.
+
+  // Get the arguments + reciever count.
+  __ ld(t0, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lw(t0, FieldMemOperand(t0, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments and return.
+  __ Daddu(sp, sp, t0);
+  __ Jump(ra);
 
   // Load debug copy of the bytecode array.
   __ bind(&load_debug_bytecode_array);
   __ ld(kInterpreterBytecodeArrayRegister,
         FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
   __ Branch(&bytecode_array_loaded);
-}
 
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
-
-  // The return value is in accumulator, which is already in v0.
-
-  // Leave the frame (also dropping the register file).
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ bind(&bytecode_array_not_present);
   __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  // Drop receiver + arguments and return.
-  __ lw(at, FieldMemOperand(kInterpreterBytecodeArrayRegister,
-                            BytecodeArray::kParameterSizeOffset));
-  __ Daddu(sp, sp, at);
-  __ Jump(ra);
+  __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
+  __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(a1, a4, a5);
+  __ Jump(a4);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -979,7 +1105,6 @@
           RelocInfo::CODE_TARGET);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -1010,25 +1135,24 @@
   __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+  __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
+                           Code::kHeaderSize - kHeapObjectTag));
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register and dispatch table register.
-  __ Daddu(kInterpreterRegisterFileRegister, fp,
-           Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  // Initialize the dispatch table register.
   __ li(kInterpreterDispatchTableRegister,
         Operand(ExternalReference::interpreter_dispatch_table_address(
             masm->isolate())));
 
-  // Get the context from the frame.
-  __ ld(kContextRegister,
-        MemOperand(kInterpreterRegisterFileRegister,
-                   InterpreterFrameConstants::kContextFromRegisterPointer));
-
   // Get the bytecode array pointer from the frame.
-  __ ld(
-      kInterpreterBytecodeArrayRegister,
-      MemOperand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+  __ ld(kInterpreterBytecodeArrayRegister,
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1042,9 +1166,7 @@
 
   // Get the target bytecode offset from the frame.
   __ ld(kInterpreterBytecodeOffsetRegister,
-        MemOperand(
-            kInterpreterRegisterFileRegister,
-            InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+        MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
   // Dispatch to the target bytecode.
@@ -1053,70 +1175,170 @@
   __ lbu(a1, MemOperand(a1));
   __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
   __ ld(a1, MemOperand(a1));
-  __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(a1);
 }
 
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ push(a1);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register.
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a3 : new target (preserved for callee)
+  //  -- a1 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register argument_count = a0;
+  Register closure = a1;
+  Register new_target = a3;
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = a0;
+  Register index = a2;
+  __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+
+  // Find literals.
+  // a3  : native context
+  // a2  : length / index
+  // a0  : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = a3;
+  __ ld(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = a1;
+  Register array_pointer = a5;
+
+  // Does the native context match?
+  __ SmiScale(at, index, kPointerSizeLog2);
+  __ Daddu(array_pointer, map, Operand(at));
+  __ ld(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousContext));
+  __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+  // OSR id set to none?
+  __ ld(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+  // Literals available?
+  __ ld(temp, FieldMemOperand(array_pointer,
+                              SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ ld(a4, MemOperand(sp, 0));
+  __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
+  __ push(index);
+  __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = a4;
+  __ ld(entry,
+        FieldMemOperand(array_pointer,
+                        SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, a5);
+
+  // Link the closure into the optimized function list.
+  // a4 : code entry
+  // a3 : native context
+  // a1 : closure
+  __ ld(a5,
+        ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ sd(closure,
+        ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  // Save closure before the write barrier.
+  __ mov(a5, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
+                            kRAHasNotBeenSaved, kDontSaveFPRegs);
+  __ mov(closure, a5);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ Jump(entry);
+
+  __ bind(&loop_bottom);
+  __ Dsubu(index, index,
+           Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+                                        SharedFunctionInfo::kSharedCodeIndex));
+  __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ And(a5, a5, Operand(Code::KindField::kMask));
+  __ dsrl(a5, a5, Code::KindField::kShift);
+  __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
+  // Yes, install the full code.
+  __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+  __ RecordWriteCodeEntryField(closure, entry, a5);
+  __ Jump(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
                                  Runtime::kCompileOptimized_NotConcurrent);
 }
 
-
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
@@ -1252,15 +1474,19 @@
   __ SmiUntag(a6);
   // Switch on the state.
   Label with_tos_register, unknown_state;
-  __ Branch(&with_tos_register,
-            ne, a6, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ Branch(
+      &with_tos_register, ne, a6,
+      Operand(static_cast<int64_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
   __ Ret(USE_DELAY_SLOT);
   // Safe to fill delay slot Addu will emit one instruction.
   __ Daddu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
 
   __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
   __ ld(v0, MemOperand(sp, 1 * kPointerSize));
-  __ Branch(&unknown_state, ne, a6, Operand(FullCodeGenerator::TOS_REG));
+  __ Branch(
+      &unknown_state, ne, a6,
+      Operand(static_cast<int64_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
 
   __ Ret(USE_DELAY_SLOT);
   // Safe to fill delay slot Addu will emit one instruction.
@@ -1467,28 +1693,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0    : argc
-  //  -- sp[0] : first argument (left-hand side)
-  //  -- sp[8] : receiver (right-hand side)
-  // -----------------------------------
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ ld(InstanceOfDescriptor::LeftRegister(),
-          MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
-    __ ld(InstanceOfDescriptor::RightRegister(),
-          MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ DropAndRet(2);
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0    : argc
@@ -2457,6 +2661,30 @@
           RelocInfo::CODE_TARGET);
 }
 
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : requested object size (untagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ SmiTag(a0);
+  __ Push(a0);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : requested object size (untagged)
+  //  -- ra : return address
+  // -----------------------------------
+  __ SmiTag(a0);
+  __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(a0, a1);
+  __ Move(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // State setup as expected by MacroAssembler::InvokePrologue.
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index fdb6c81..5702c78 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -53,12 +53,6 @@
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -71,11 +65,6 @@
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1461,128 +1450,6 @@
 }
 
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = a1;              // Object (lhs).
-  Register const function = a0;            // Function (rhs).
-  Register const object_map = a2;          // Map of {object}.
-  Register const function_map = a3;        // Map of {function}.
-  Register const function_prototype = a4;  // Prototype of {function}.
-  Register const scratch = a5;
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
-  __ Branch(&fast_case, ne, function, Operand(at));
-  __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
-  __ Branch(&fast_case, ne, object_map, Operand(at));
-  __ Ret(USE_DELAY_SLOT);
-  __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);  // In delay slot.
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ GetObjectType(function, function_map, scratch);
-  __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
-  __ Ret(USE_DELAY_SLOT);
-  __ LoadRoot(v0, Heap::kFalseValueRootIndex);  // In delay slot.
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ GetObjectType(function, function_map, scratch);
-  __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
-  // Go to the runtime if the function is not a constructor.
-  __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
-  __ And(at, scratch, Operand(1 << Map::kIsConstructor));
-  __ Branch(&slow_case, eq, at, Operand(zero_reg));
-
-  // Ensure that {function} has an instance prototype.
-  __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
-  __ Branch(&slow_case, ne, at, Operand(zero_reg));
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ ld(function_prototype,
-        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  __ GetObjectType(function_prototype, scratch, scratch);
-  __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
-  __ ld(function_prototype,
-        FieldMemOperand(function_prototype, Map::kPrototypeOffset));
-  __ bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Register const object_instance_type = function_map;
-  Register const map_bit_field = function_map;
-  Register const null = scratch;
-  Register const result = v0;
-
-  Label done, loop, fast_runtime_fallback;
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ LoadRoot(null, Heap::kNullValueRootIndex);
-  __ bind(&loop);
-
-  // Check if the object needs to be access checked.
-  __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
-  __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
-  // Check if the current object is a Proxy.
-  __ lbu(object_instance_type,
-         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
-  __ Branch(&fast_runtime_fallback, eq, object_instance_type,
-            Operand(JS_PROXY_TYPE));
-
-  __ ld(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ Branch(&done, eq, object, Operand(function_prototype));
-  __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
-  __ ld(object_map,
-        FieldMemOperand(object, HeapObject::kMapOffset));  // In delay slot.
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-  __ Ret(USE_DELAY_SLOT);
-  __ StoreRoot(result,
-               Heap::kInstanceofCacheAnswerRootIndex);  // In delay slot.
-
-  // Found Proxy or access check needed: Call the runtime
-  __ bind(&fast_runtime_fallback);
-  __ Push(object, function_prototype);
-  // Invalidate the instanceof cache.
-  DCHECK(Smi::FromInt(0) == 0);
-  __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ Push(object, function);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
-
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
@@ -4046,8 +3913,8 @@
   __ bind(&not_array);
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&miss, ne, feedback, Operand(at));
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
                                                receiver, name, feedback,
                                                receiver_map, scratch1, a7);
@@ -4189,8 +4056,8 @@
 
   __ bind(&not_array);
   __ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
       scratch1, scratch2);
@@ -4758,15 +4625,15 @@
   __ bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ sd(a2, MemOperand(v0, JSObject::kMapOffset));
+  __ sd(a2, FieldMemOperand(v0, JSObject::kMapOffset));
   __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
-  __ sd(a3, MemOperand(v0, JSObject::kPropertiesOffset));
-  __ sd(a3, MemOperand(v0, JSObject::kElementsOffset));
+  __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ Daddu(a1, v0, Operand(JSObject::kHeaderSize));
+  __ Daddu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
 
   // ----------- S t a t e -------------
-  //  -- v0 : result (untagged)
+  //  -- v0 : result (tagged)
   //  -- a1 : result fields (untagged)
   //  -- a5 : result end (untagged)
   //  -- a2 : initial map
@@ -4784,11 +4651,7 @@
   {
     // Initialize all in-object fields with undefined.
     __ InitializeFieldsWithFiller(a1, a5, a0);
-
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ Ret(USE_DELAY_SLOT);
-    __ Daddu(v0, v0, Operand(kHeapObjectTag));  // In delay slot.
+    __ Ret();
   }
   __ bind(&slack_tracking);
   {
@@ -4811,9 +4674,7 @@
     Label finalize;
     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
     __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
-    __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ Daddu(v0, v0, Operand(kHeapObjectTag));  // In delay slot.
+    __ Branch(&finalize, eq, a3, Operand(zero_reg));
     __ Ret();
 
     // Finalize the instance size.
@@ -4839,10 +4700,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(a2);
   }
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ Dsubu(v0, v0, Operand(kHeapObjectTag));
   __ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
   __ Dlsa(a5, v0, a5, kPointerSizeLog2);
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ Dsubu(a5, a5, Operand(kHeapObjectTag));
   __ jmp(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4861,19 +4722,19 @@
   // -----------------------------------
   __ AssertFunction(a1);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make a2 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ Branch(USE_DELAY_SLOT, &loop_entry);
-    __ mov(a2, fp);  // In delay slot.
-    __ bind(&loop);
+  // Make a2 point to the JavaScript frame.
+  __ mov(a2, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&loop, ne, a1, Operand(a3));
+    __ Branch(&ok, eq, a1, Operand(a3));
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4905,7 +4766,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the rest parameter array in v0.
@@ -4947,7 +4808,7 @@
     Label allocate, done_allocate;
     __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
     __ Dlsa(a1, a1, a0, kPointerSizeLog2);
-    __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+    __ Allocate(a1, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Compute arguments.length in a4.
@@ -5007,24 +4868,40 @@
   // -----------------------------------
   __ AssertFunction(a1);
 
+  // Make t0 point to the JavaScript frame.
+  __ mov(t0, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ ld(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ ld(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
+    __ Branch(&ok, eq, a1, Operand(a3));
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
+  }
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   __ lw(a2,
          FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Lsa(a3, fp, a2, kPointerSizeLog2);
+  __ Lsa(a3, t0, a2, kPointerSizeLog2);
   __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
   __ SmiTag(a2);
 
   // a1 : function
   // a2 : number of parameters (tagged)
   // a3 : parameters pointer
+  // t0 : Javascript frame pointer
   // Registers used over whole function:
   //  a5 : arguments count (tagged)
   //  a6 : mapped parameter count (tagged)
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
-  __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a4, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
   __ ld(a0, MemOperand(a4, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&adaptor_frame, eq, a0,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -5072,7 +4949,7 @@
   __ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
+  __ Allocate(t1, v0, t1, a4, &runtime, NO_ALLOCATION_FLAGS);
 
   // v0 = address of new object(s) (tagged)
   // a2 = argument count (smi-tagged)
@@ -5227,19 +5104,19 @@
   // -----------------------------------
   __ AssertFunction(a1);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make a2 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ Branch(USE_DELAY_SLOT, &loop_entry);
-    __ mov(a2, fp);  // In delay slot.
-    __ bind(&loop);
+  // Make a2 point to the JavaScript frame.
+  __ mov(a2, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&loop, ne, a1, Operand(a3));
+    __ Branch(&ok, eq, a1, Operand(a3));
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -5278,7 +5155,7 @@
   Label allocate, done_allocate;
   __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
   __ Dlsa(a1, a1, a0, kPointerSizeLog2);
-  __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+  __ Allocate(a1, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Compute arguments.length in a4.
@@ -5636,7 +5513,11 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
+
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
 
   // Save context, callee and call data.
   __ Push(context, callee, call_data);
@@ -5660,7 +5541,7 @@
 
   // Allocate the v8::Arguments structure in the arguments' space since
   // it's not controlled by GC.
-  const int kApiStackSpace = 4;
+  const int kApiStackSpace = 3;
 
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
@@ -5680,8 +5561,6 @@
   // justified by n64 ABI.
   __ li(at, Operand(argc()));
   __ sw(at, MemOperand(a0, 2 * kPointerSize));
-  // FunctionCallbackInfo::is_construct_call_ = 0
-  __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5698,8 +5577,9 @@
   }
   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   int stack_space = 0;
-  int32_t stack_space_offset = 4 * kPointerSize;
+  int32_t stack_space_offset = 3 * kPointerSize;
   stack_space = argc() + FCA::kArgsLength + 1;
+  // TODO(adamk): Why are we clobbering this immediately?
   stack_space_offset = kInvalidStackOffset;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
                            stack_space_offset, return_value_operand,
@@ -5708,15 +5588,44 @@
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- sp[0]                        : name
-  //  -- sp[8 .. (8 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- a2                           : api_function_address
-  // -----------------------------------
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
 
-  Register api_function_address = ApiGetterDescriptor::function_address();
-  DCHECK(api_function_address.is(a2));
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
+  Register scratch = a4;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+  Register api_function_address = a2;
+
+  // Here and below +1 is for name() pushed after the args_ array.
+  typedef PropertyCallbackArguments PCA;
+  __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+  __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+  __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+  __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+  __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+                                    kPointerSize));
+  __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
+  __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+  __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+  // should_throw_on_error -> false
+  DCHECK(Smi::FromInt(0) == nullptr);
+  __ sd(zero_reg,
+        MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+  __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+  __ sd(scratch, MemOperand(sp, 0 * kPointerSize));
 
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5738,6 +5647,10 @@
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+  __ ld(api_function_address,
+        FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
   // +3 is to skip prolog, return address and name handle.
   MemOperand return_value_operand(
       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5746,7 +5659,6 @@
                            return_value_operand, NULL);
 }
 
-
 #undef __
 
 }  // namespace internal
diff --git a/src/mips64/codegen-mips64.cc b/src/mips64/codegen-mips64.cc
index 44d822b..678f606 100644
--- a/src/mips64/codegen-mips64.cc
+++ b/src/mips64/codegen-mips64.cc
@@ -732,6 +732,7 @@
   __ SmiScale(scratch, length, kDoubleSizeLog2);
   __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
   __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+  __ Dsubu(array, array, kHeapObjectTag);
   // array: destination FixedDoubleArray, not tagged as heap object
 
   // Set destination FixedDoubleArray's length and map.
@@ -882,6 +883,7 @@
   __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
               NO_ALLOCATION_FLAGS);
+  __ Dsubu(array, array, kHeapObjectTag);
   // array: destination FixedArray, not tagged as heap object
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
diff --git a/src/mips64/constants-mips64.h b/src/mips64/constants-mips64.h
index 57e947b..8272420 100644
--- a/src/mips64/constants-mips64.h
+++ b/src/mips64/constants-mips64.h
@@ -60,6 +60,27 @@
 const bool IsMipsSoftFloatABI = true;
 #endif
 
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const uint32_t kMipsLwrOffset = 0;
+const uint32_t kMipsLwlOffset = 3;
+const uint32_t kMipsSwrOffset = 0;
+const uint32_t kMipsSwlOffset = 3;
+const uint32_t kMipsLdrOffset = 0;
+const uint32_t kMipsLdlOffset = 7;
+const uint32_t kMipsSdrOffset = 0;
+const uint32_t kMipsSdlOffset = 7;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const uint32_t kMipsLwrOffset = 3;
+const uint32_t kMipsLwlOffset = 0;
+const uint32_t kMipsSwrOffset = 3;
+const uint32_t kMipsSwlOffset = 0;
+const uint32_t kMipsLdrOffset = 7;
+const uint32_t kMipsLdlOffset = 0;
+const uint32_t kMipsSdrOffset = 7;
+const uint32_t kMipsSdlOffset = 0;
+#else
+#error Unknown endianness
+#endif
 
 #ifndef __STDC_FORMAT_MACROS
 #define __STDC_FORMAT_MACROS
@@ -384,6 +405,7 @@
   MOVZ = ((1U << 3) + 2),
   MOVN = ((1U << 3) + 3),
   BREAK = ((1U << 3) + 5),
+  SYNC = ((1U << 3) + 7),
 
   MFHI = ((2U << 3) + 0),
   CLZ_R6 = ((2U << 3) + 0),
@@ -645,7 +667,6 @@
   NULLSF = 0U
 };
 
-
 // ----- Emulated conditions.
 // On MIPS we use this enum to abstract from conditional branch instructions.
 // The 'U' prefix is used to specify unsigned comparisons.
@@ -911,7 +932,6 @@
 
   enum TypeChecks { NORMAL, EXTRA };
 
-
   static constexpr uint64_t kOpcodeImmediateTypeMask =
       OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
       OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
@@ -926,12 +946,14 @@
       OpcodeToBitNumber(POP76) | OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) |
       OpcodeToBitNumber(LWL) | OpcodeToBitNumber(LW) | OpcodeToBitNumber(LWU) |
       OpcodeToBitNumber(LD) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) |
-      OpcodeToBitNumber(LWR) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
+      OpcodeToBitNumber(LDL) | OpcodeToBitNumber(LDR) | OpcodeToBitNumber(LWR) |
+      OpcodeToBitNumber(SDL) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
       OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SD) |
-      OpcodeToBitNumber(SWR) | OpcodeToBitNumber(LWC1) |
-      OpcodeToBitNumber(LDC1) | OpcodeToBitNumber(SWC1) |
-      OpcodeToBitNumber(SDC1) | OpcodeToBitNumber(PCREL) |
-      OpcodeToBitNumber(DAUI) | OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
+      OpcodeToBitNumber(SWR) | OpcodeToBitNumber(SDR) |
+      OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(LDC1) |
+      OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(SDC1) |
+      OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(DAUI) |
+      OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
 
 #define FunctionFieldToBitNumber(function) (1ULL << function)
 
@@ -964,8 +986,7 @@
       FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) |
       FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) |
       FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
-      FunctionFieldToBitNumber(SELNEZ_S);
-
+      FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
 
   // Get the encoding type of the instruction.
   inline Type InstructionType(TypeChecks checks = NORMAL) const;
diff --git a/src/mips64/disasm-mips64.cc b/src/mips64/disasm-mips64.cc
index 3d0e10c..7b05493 100644
--- a/src/mips64/disasm-mips64.cc
+++ b/src/mips64/disasm-mips64.cc
@@ -375,7 +375,8 @@
   uint64_t target = static_cast<uint64_t>(instr->Imm26Value())
                     << kImmFieldShift;
   target = (reinterpret_cast<uint64_t>(instr) & ~0xfffffff) | target;
-  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%lx", target);
+  out_buffer_pos_ +=
+      SNPrintF(out_buffer_ + out_buffer_pos_, "0x%" PRIx64, target);
 }
 
 
@@ -801,16 +802,14 @@
   if (instr->Bits(25, 6) == static_cast<int>(kMaxStopCode)) {
     // This is stop(msg).
     Format(instr, "break, code: 'code");
-    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                "\n%p       %08lx       stop msg: %s",
-                                static_cast<void*>
-                                      (reinterpret_cast<int32_t*>(instr
-                                              + Instruction::kInstrSize)),
-                                reinterpret_cast<uint64_t>
-                                (*reinterpret_cast<char**>(instr
-                                              + Instruction::kInstrSize)),
-                                *reinterpret_cast<char**>(instr
-                                              + Instruction::kInstrSize));
+    out_buffer_pos_ += SNPrintF(
+        out_buffer_ + out_buffer_pos_,
+        "\n%p       %08" PRIx64 "       stop msg: %s",
+        static_cast<void*>(
+            reinterpret_cast<int32_t*>(instr + Instruction::kInstrSize)),
+        reinterpret_cast<uint64_t>(
+            *reinterpret_cast<char**>(instr + Instruction::kInstrSize)),
+        *reinterpret_cast<char**>(instr + Instruction::kInstrSize));
     // Size 3: the break_ instr, plus embedded 64-bit char pointer.
     return 3 * Instruction::kInstrSize;
   } else {
@@ -1162,26 +1161,22 @@
       if (instr->RsValue() == 0) {
         Format(instr, "srl     'rd, 'rt, 'sa");
       } else {
-        if (kArchVariant == kMips64r2) {
-          Format(instr, "rotr    'rd, 'rt, 'sa");
-        } else {
-          Unknown(instr);
-        }
+        Format(instr, "rotr    'rd, 'rt, 'sa");
       }
       break;
     case DSRL:
       if (instr->RsValue() == 0) {
         Format(instr, "dsrl    'rd, 'rt, 'sa");
       } else {
-        if (kArchVariant == kMips64r2) {
-          Format(instr, "drotr   'rd, 'rt, 'sa");
-        } else {
-          Unknown(instr);
-        }
+        Format(instr, "drotr   'rd, 'rt, 'sa");
       }
       break;
     case DSRL32:
-      Format(instr, "dsrl32  'rd, 'rt, 'sa");
+      if (instr->RsValue() == 0) {
+        Format(instr, "dsrl32  'rd, 'rt, 'sa");
+      } else {
+        Format(instr, "drotr32 'rd, 'rt, 'sa");
+      }
       break;
     case SRA:
       Format(instr, "sra     'rd, 'rt, 'sa");
@@ -1202,22 +1197,14 @@
       if (instr->SaValue() == 0) {
         Format(instr, "srlv    'rd, 'rt, 'rs");
       } else {
-        if (kArchVariant == kMips64r2) {
-          Format(instr, "rotrv   'rd, 'rt, 'rs");
-        } else {
-          Unknown(instr);
-        }
+        Format(instr, "rotrv   'rd, 'rt, 'rs");
       }
       break;
     case DSRLV:
       if (instr->SaValue() == 0) {
         Format(instr, "dsrlv   'rd, 'rt, 'rs");
       } else {
-        if (kArchVariant == kMips64r2) {
-          Format(instr, "drotrv  'rd, 'rt, 'rs");
-        } else {
-          Unknown(instr);
-        }
+        Format(instr, "drotrv  'rd, 'rt, 'rs");
       }
       break;
     case SRAV:
@@ -1400,6 +1387,9 @@
     case TNE:
       Format(instr, "tne     'rs, 'rt, code: 'code");
       break;
+    case SYNC:
+      Format(instr, "sync");
+      break;
     case MOVZ:
       Format(instr, "movz    'rd, 'rs, 'rt");
       break;
diff --git a/src/mips64/interface-descriptors-mips64.cc b/src/mips64/interface-descriptors-mips64.cc
index 7695d0b..67bae36 100644
--- a/src/mips64/interface-descriptors-mips64.cc
+++ b/src/mips64/interface-descriptors-mips64.cc
@@ -46,16 +46,11 @@
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
 
 
-const Register InstanceOfDescriptor::LeftRegister() { return a1; }
-const Register InstanceOfDescriptor::RightRegister() { return a0; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return a1; }
 const Register StringCompareDescriptor::RightRegister() { return a0; }
 
-
-const Register ApiGetterDescriptor::function_address() { return a2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return a0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
 
 const Register MathPowTaggedDescriptor::exponent() { return a2; }
 
@@ -68,6 +63,8 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return a0; }
+const Register HasPropertyDescriptor::KeyRegister() { return a3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -247,13 +244,16 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  // register state
+  // a0 -- number of arguments
+  // a1 -- function
+  // a2 -- allocation site with elements kind
+  Register registers[] = {a1, a2, a0};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
@@ -316,6 +316,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -376,9 +381,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
-      kInterpreterDispatchTableRegister};
+      kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+      kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -413,6 +417,16 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      v0,  // the value to pass to the generator
+      a1,  // the JSGeneratorObject to resume
+      a2   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index fb83fe9..b7b4f28 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -1325,33 +1325,175 @@
 // ------------Pseudo-instructions-------------
 
 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
-  lwr(rd, rs);
-  lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (kArchVariant == kMips64r6) {
+    lw(rd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    if (is_int16(rs.offset() + kMipsLwrOffset) &&
+        is_int16(rs.offset() + kMipsLwlOffset)) {
+      if (!rd.is(rs.rm())) {
+        lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+        lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+      } else {
+        lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+        lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+        mov(rd, at);
+      }
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+      lwr(rd, MemOperand(at, kMipsLwrOffset));
+      lwl(rd, MemOperand(at, kMipsLwlOffset));
+    }
+  }
+}
+
+void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
+  if (kArchVariant == kMips64r6) {
+    lwu(rd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    Ulw(rd, rs);
+    Dext(rd, rd, 0, 32);
+  }
 }
 
 
 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
-  swr(rd, rs);
-  swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (kArchVariant == kMips64r6) {
+    sw(rd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    if (is_int16(rs.offset() + kMipsSwrOffset) &&
+        is_int16(rs.offset() + kMipsSwlOffset)) {
+      swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
+      swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
+    } else {
+      LoadRegPlusOffsetToAt(rs);
+      swr(rd, MemOperand(at, kMipsSwrOffset));
+      swl(rd, MemOperand(at, kMipsSwlOffset));
+    }
+  }
 }
 
-
-// Do 64-bit load from unaligned address. Note this only handles
-// the specific case of 32-bit aligned, but not 64-bit aligned.
-void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
-  // Assert fail if the offset from start of object IS actually aligned.
-  // ONLY use with known misalignment, since there is performance cost.
-  DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
-  if (kArchEndian == kLittle) {
-    lwu(rd, rs);
-    lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
-    dsll32(scratch, scratch, 0);
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (kArchVariant == kMips64r6) {
+    lh(rd, rs);
   } else {
-    lw(rd, rs);
-    lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
-    dsll32(rd, rd, 0);
+    DCHECK(kArchVariant == kMips64r2);
+    if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(at, rs);
+      lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+      lb(rd, rs);
+#endif
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lb(rd, MemOperand(at, 1));
+      lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lb(rd, MemOperand(at, 0));
+      lbu(at, MemOperand(at, 1));
+#endif
+    }
+    dsll(rd, rd, 8);
+    or_(rd, rd, at);
   }
-  Daddu(rd, rd, scratch);
+}
+
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (kArchVariant == kMips64r6) {
+    lhu(rd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(at, rs);
+      lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+      lbu(rd, rs);
+#endif
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(rd, MemOperand(at, 1));
+      lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(rd, MemOperand(at, 0));
+      lbu(at, MemOperand(at, 1));
+#endif
+    }
+    dsll(rd, rd, 8);
+    or_(rd, rd, at);
+  }
+}
+
+void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  DCHECK(!rs.rm().is(scratch));
+  DCHECK(!scratch.is(at));
+  if (kArchVariant == kMips64r6) {
+    sh(rd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    MemOperand source = rs;
+    // If offset > 16 bits, load address to at with offset 0.
+    if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
+      LoadRegPlusOffsetToAt(rs);
+      source = MemOperand(at, 0);
+    }
+
+    if (!scratch.is(rd)) {
+      mov(scratch, rd);
+    }
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+    sb(scratch, source);
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+    sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+    srl(scratch, scratch, 8);
+    sb(scratch, source);
+#endif
+  }
+}
+
+void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (kArchVariant == kMips64r6) {
+    ld(rd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    if (is_int16(rs.offset() + kMipsLdrOffset) &&
+        is_int16(rs.offset() + kMipsLdlOffset)) {
+      if (!rd.is(rs.rm())) {
+        ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
+        ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
+      } else {
+        ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
+        ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
+        mov(rd, at);
+      }
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+      ldr(rd, MemOperand(at, kMipsLdrOffset));
+      ldl(rd, MemOperand(at, kMipsLdlOffset));
+    }
+  }
 }
 
 
@@ -1366,21 +1508,22 @@
   Daddu(rd, rd, scratch);
 }
 
-
-// Do 64-bit store to unaligned address. Note this only handles
-// the specific case of 32-bit aligned, but not 64-bit aligned.
-void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
-  // Assert fail if the offset from start of object IS actually aligned.
-  // ONLY use with known misalignment, since there is performance cost.
-  DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
-  if (kArchEndian == kLittle) {
-    sw(rd, rs);
-    dsrl32(scratch, rd, 0);
-    sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (kArchVariant == kMips64r6) {
+    sd(rd, rs);
   } else {
-    sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
-    dsrl32(scratch, rd, 0);
-    sw(scratch, rs);
+    DCHECK(kArchVariant == kMips64r2);
+    if (is_int16(rs.offset() + kMipsSdrOffset) &&
+        is_int16(rs.offset() + kMipsSdlOffset)) {
+      sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
+      sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
+    } else {
+      LoadRegPlusOffsetToAt(rs);
+      sdr(rd, MemOperand(at, kMipsSdrOffset));
+      sdl(rd, MemOperand(at, kMipsSdlOffset));
+    }
   }
 }
 
@@ -1393,6 +1536,51 @@
   sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
 }
 
+void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  if (kArchVariant == kMips64r6) {
+    lwc1(fd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    Ulw(scratch, rs);
+    mtc1(scratch, fd);
+  }
+}
+
+void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  if (kArchVariant == kMips64r6) {
+    swc1(fd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    mfc1(scratch, fd);
+    Usw(scratch, rs);
+  }
+}
+
+void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  DCHECK(!scratch.is(at));
+  if (kArchVariant == kMips64r6) {
+    ldc1(fd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    Uld(scratch, rs);
+    dmtc1(scratch, fd);
+  }
+}
+
+void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  DCHECK(!scratch.is(at));
+  if (kArchVariant == kMips64r6) {
+    sdc1(fd, rs);
+  } else {
+    DCHECK(kArchVariant == kMips64r2);
+    dmfc1(scratch, fd);
+    Usd(scratch, rs);
+  }
+}
 
 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
   AllowDeferredHandleDereference smi_check;
@@ -4142,12 +4330,14 @@
   // to calculate the new top.
   Daddu(result_end, result, Operand(object_size));
   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
-  sd(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    Daddu(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    sd(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  Daddu(result, result, Operand(kHeapObjectTag));
 }
 
 
@@ -4217,6 +4407,7 @@
   } else {
     Daddu(result_end, result, Operand(object_size));
   }
+
   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
 
   // Update allocation top. result temporarily holds the new top.
@@ -4224,14 +4415,91 @@
     And(at, result_end, Operand(kObjectAlignmentMask));
     Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
   }
-  sd(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    Daddu(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    sd(result_end, MemOperand(top_address));
   }
+
+  // Tag object if.
+  Daddu(result, result, Operand(kHeapObjectTag));
 }
 
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register scratch1, Register scratch2,
+                                  AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(!AreAliased(result, scratch1, scratch2, at));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  DCHECK(0 == (object_size & kObjectAlignmentMask));
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  Register top_address = scratch1;
+  Register result_end = scratch2;
+  li(top_address, Operand(allocation_top));
+  ld(result, MemOperand(top_address));
+
+  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+  // the same alignment on MIPS64.
+  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+  if (emit_debug_code()) {
+    And(at, result, Operand(kDoubleAlignmentMask));
+    Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
+  }
+
+  // Calculate new top and write it back.
+  Daddu(result_end, result, Operand(object_size));
+  sd(result_end, MemOperand(top_address));
+
+  Daddu(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, Register scratch,
+                                  AllocationFlags flags) {
+  // |object_size| and |result_end| may overlap, other registers must not.
+  DCHECK(!AreAliased(object_size, result, scratch, at));
+  DCHECK(!AreAliased(result_end, result, scratch, at));
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address and object size registers.
+  Register top_address = scratch;
+  li(top_address, Operand(allocation_top));
+  ld(result, MemOperand(top_address));
+
+  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+  // the same alignment on MIPS64.
+  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+  if (emit_debug_code()) {
+    And(at, result, Operand(kDoubleAlignmentMask));
+    Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
+  }
+
+  // Calculate new top and write it back
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    Dlsa(result_end, result, object_size, kPointerSizeLog2);
+  } else {
+    Daddu(result_end, result, Operand(object_size));
+  }
+
+  // Update allocation top. result temporarily holds the new top.
+  if (emit_debug_code()) {
+    And(at, result_end, Operand(kObjectAlignmentMask));
+    Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
+  }
+
+  Daddu(result, result, Operand(kHeapObjectTag));
+}
 
 void MacroAssembler::AllocateTwoByteString(Register result,
                                            Register length,
@@ -4248,12 +4516,8 @@
   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate two-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result,
@@ -4277,12 +4541,8 @@
   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate one-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -4296,7 +4556,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
   InitializeNewString(result,
                       length,
                       Heap::kConsStringMapRootIndex,
@@ -4309,12 +4569,8 @@
                                                Register scratch1,
                                                Register scratch2,
                                                Label* gc_required) {
-  Allocate(ConsString::kSize,
-           result,
-           scratch1,
-           scratch2,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -4327,7 +4583,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result,
                       length,
@@ -4343,7 +4599,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -4369,12 +4625,11 @@
                                         Register scratch2,
                                         Register heap_number_map,
                                         Label* need_gc,
-                                        TaggingMode tagging_mode,
                                         MutableMode mode) {
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
-           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+           NO_ALLOCATION_FLAGS);
 
   Heap::RootListIndex map_index = mode == MUTABLE
       ? Heap::kMutableHeapNumberMapRootIndex
@@ -4382,11 +4637,7 @@
   AssertIsRoot(heap_number_map, map_index);
 
   // Store heap number map in the allocated object.
-  if (tagging_mode == TAG_RESULT) {
-    sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-  } else {
-    sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
-  }
+  sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
 }
 
 
@@ -4410,7 +4661,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -6225,6 +6477,16 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    SmiTst(object, t8);
+    Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+    GetObjectType(object, t8, t8);
+    Check(eq, kOperandIsNotAGeneratorObject, t8,
+          Operand(JS_GENERATOR_OBJECT_TYPE));
+  }
+}
 
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
@@ -6696,7 +6958,7 @@
                                                      Label* no_memento_found) {
   Label map_check;
   Label top_check;
-  ExternalReference new_space_allocation_top =
+  ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -6705,14 +6967,16 @@
   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
-  Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+  Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  li(at, Operand(new_space_allocation_top_adr));
+  ld(at, MemOperand(at));
+  Xor(scratch_reg, scratch_reg, Operand(at));
   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
   Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
   // The object is on a different page than allocation top. Bail out if the
   // object sits on the page boundary as no memento can follow and we cannot
   // touch the memory following it.
-  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
   Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
   Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
@@ -6721,13 +6985,13 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
-  li(at, Operand(new_space_allocation_top));
-  lw(at, MemOperand(at));
+  Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  li(at, Operand(new_space_allocation_top_adr));
+  ld(at, MemOperand(at));
   Branch(no_memento_found, gt, scratch_reg, Operand(at));
   // Memento map check.
   bind(&map_check);
-  lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+  ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
   Branch(no_memento_found, ne, scratch_reg,
          Operand(isolate()->factory()->allocation_memento_map()));
 }
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
index 401112d..27a34b3 100644
--- a/src/mips64/macro-assembler-mips64.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -18,8 +18,8 @@
 const Register kReturnRegister2 = {Register::kCode_a0};
 const Register kJSFunctionRegister = {Register::kCode_a1};
 const Register kContextRegister = {Register::kCpRegister};
+const Register kAllocateSizeRegister = {Register::kCode_a0};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_a7};
 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
 const Register kInterpreterDispatchTableRegister = {Register::kCode_t2};
@@ -587,6 +587,15 @@
   void Allocate(Register object_size, Register result, Register result_end,
                 Register scratch, Label* gc_required, AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(int object_size, Register result, Register scratch1,
+                    Register scratch2, AllocationFlags flags);
+
+  void FastAllocate(Register object_size, Register result, Register result_new,
+                    Register scratch, AllocationFlags flags);
+
   void AllocateTwoByteString(Register result,
                              Register length,
                              Register scratch1,
@@ -621,7 +630,6 @@
                           Register scratch2,
                           Register heap_number_map,
                           Label* gc_required,
-                          TaggingMode tagging_mode = TAG_RESULT,
                           MutableMode mode = IMMUTABLE);
 
   void AllocateHeapNumberWithValue(Register result,
@@ -716,10 +724,22 @@
 
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
+  void Ulh(Register rd, const MemOperand& rs);
+  void Ulhu(Register rd, const MemOperand& rs);
+  void Ush(Register rd, const MemOperand& rs, Register scratch);
+
   void Ulw(Register rd, const MemOperand& rs);
+  void Ulwu(Register rd, const MemOperand& rs);
   void Usw(Register rd, const MemOperand& rs);
-  void Uld(Register rd, const MemOperand& rs, Register scratch = at);
-  void Usd(Register rd, const MemOperand& rs, Register scratch = at);
+
+  void Uld(Register rd, const MemOperand& rs);
+  void Usd(Register rd, const MemOperand& rs);
+
+  void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
+  void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
+  void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
+  void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
 
   void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
   void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
@@ -1714,6 +1734,10 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
   void AssertReceiver(Register object);
 
diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc
index 9519865..3157030 100644
--- a/src/mips64/simulator-mips64.cc
+++ b/src/mips64/simulator-mips64.cc
@@ -268,33 +268,42 @@
 
   PrintF("\n");
   // at, v0, a0.
-  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+  PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64
+         "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n",
          REG_INFO(1), REG_INFO(2), REG_INFO(4));
   // v1, a1.
-  PrintF("%34s\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+  PrintF("%34s\t%3s: 0x%016" PRIx64 "  %14" PRId64 " \t%3s: 0x%016" PRIx64
+         "  %14" PRId64 " \n",
          "", REG_INFO(3), REG_INFO(5));
   // a2.
-  PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(6));
+  PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 "  %14" PRId64 " \n", "", "",
+         REG_INFO(6));
   // a3.
-  PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(7));
+  PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 "  %14" PRId64 " \n", "", "",
+         REG_INFO(7));
   PrintF("\n");
   // a4-t3, s0-s7
   for (int i = 0; i < 8; i++) {
-    PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
-           REG_INFO(8+i), REG_INFO(16+i));
+    PrintF("%3s: 0x%016" PRIx64 "  %14" PRId64 " \t%3s: 0x%016" PRIx64
+           "  %14" PRId64 " \n",
+           REG_INFO(8 + i), REG_INFO(16 + i));
   }
   PrintF("\n");
   // t8, k0, LO.
-  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+  PrintF("%3s: 0x%016" PRIx64 "  %14" PRId64 " \t%3s: 0x%016" PRIx64
+         "  %14" PRId64 " \t%3s: 0x%016" PRIx64 "  %14" PRId64 " \n",
          REG_INFO(24), REG_INFO(26), REG_INFO(32));
   // t9, k1, HI.
-  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+  PrintF("%3s: 0x%016" PRIx64 "  %14" PRId64 " \t%3s: 0x%016" PRIx64
+         "  %14" PRId64 " \t%3s: 0x%016" PRIx64 "  %14" PRId64 " \n",
          REG_INFO(25), REG_INFO(27), REG_INFO(33));
   // sp, fp, gp.
-  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+  PrintF("%3s: 0x%016" PRIx64 "  %14" PRId64 " \t%3s: 0x%016" PRIx64
+         "  %14" PRId64 " \t%3s: 0x%016" PRIx64 "  %14" PRId64 " \n",
          REG_INFO(29), REG_INFO(30), REG_INFO(28));
   // pc.
-  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+  PrintF("%3s: 0x%016" PRIx64 "  %14" PRId64 " \t%3s: 0x%016" PRIx64
+         "  %14" PRId64 " \n",
          REG_INFO(31), REG_INFO(34));
 
 #undef REG_INFO
@@ -312,38 +321,38 @@
   PrintF("\n\n");
   // f0, f1, f2, ... f31.
   // TODO(plind): consider printing 2 columns for space efficiency.
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(0) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(1) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(2) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(3) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(4) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(5) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(6) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(7) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(8) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(9) );
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(10));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(11));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(12));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(13));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(14));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(15));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(16));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(17));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(18));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(19));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(20));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(21));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(22));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(23));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(24));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(25));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(26));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(27));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(28));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(29));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(30));
-  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(31));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(0));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(1));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(2));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(3));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(4));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(5));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(6));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(7));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(8));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(9));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(10));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(11));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(12));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(13));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(14));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(15));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(16));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(17));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(18));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(19));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(20));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(21));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(22));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(23));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(24));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(25));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(26));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(27));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(28));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(29));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(30));
+  PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n", FPU_REG_INFO(31));
 
 #undef REG_INFO
 #undef FPU_REG_INFO
@@ -382,7 +391,7 @@
       v8::internal::EmbeddedVector<char, 256> buffer;
       dasm.InstructionDecode(buffer,
                              reinterpret_cast<byte*>(sim_->get_pc()));
-      PrintF("  0x%016lx  %s\n", sim_->get_pc(), buffer.start());
+      PrintF("  0x%016" PRIx64 "   %s\n", sim_->get_pc(), buffer.start());
       last_pc = sim_->get_pc();
     }
     char* line = ReadLine("sim> ");
@@ -433,11 +442,12 @@
 
             if (regnum != kInvalidRegister) {
               value = GetRegisterValue(regnum);
-              PrintF("%s: 0x%08lx %ld \n", arg1, value, value);
+              PrintF("%s: 0x%08" PRIx64 "  %" PRId64 "  \n", arg1, value,
+                     value);
             } else if (fpuregnum != kInvalidFPURegister) {
               value = GetFPURegisterValue(fpuregnum);
               dvalue = GetFPURegisterValueDouble(fpuregnum);
-              PrintF("%3s: 0x%016lx %16.4e\n",
+              PrintF("%3s: 0x%016" PRIx64 "  %16.4e\n",
                      FPURegisters::Name(fpuregnum), value, dvalue);
             } else {
               PrintF("%s unrecognized\n", arg1);
@@ -454,7 +464,7 @@
                 value = GetFPURegisterValue(fpuregnum);
                 value &= 0xffffffffUL;
                 fvalue = GetFPURegisterValueFloat(fpuregnum);
-                PrintF("%s: 0x%08lx %11.4e\n", arg1, value, fvalue);
+                PrintF("%s: 0x%08" PRIx64 "  %11.4e\n", arg1, value, fvalue);
               } else {
                 PrintF("%s unrecognized\n", arg1);
               }
@@ -513,7 +523,7 @@
         end = cur + words;
 
         while (cur < end) {
-          PrintF("  0x%012lx:  0x%016lx %14ld",
+          PrintF("  0x%012" PRIxPTR " :  0x%016" PRIx64 "  %14" PRId64 " ",
                  reinterpret_cast<intptr_t>(cur), *cur, *cur);
           HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
           int64_t value = *cur;
@@ -576,8 +586,8 @@
 
         while (cur < end) {
           dasm.InstructionDecode(buffer, cur);
-          PrintF("  0x%08lx  %s\n",
-              reinterpret_cast<intptr_t>(cur), buffer.start());
+          PrintF("  0x%08" PRIxPTR "   %s\n", reinterpret_cast<intptr_t>(cur),
+                 buffer.start());
           cur += Instruction::kInstrSize;
         }
       } else if (strcmp(cmd, "gdb") == 0) {
@@ -698,8 +708,8 @@
 
         while (cur < end) {
           dasm.InstructionDecode(buffer, cur);
-          PrintF("  0x%08lx  %s\n",
-                 reinterpret_cast<intptr_t>(cur), buffer.start());
+          PrintF("  0x%08" PRIxPTR "   %s\n", reinterpret_cast<intptr_t>(cur),
+                 buffer.start());
           cur += Instruction::kInstrSize;
         }
       } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
@@ -1683,7 +1693,7 @@
 
 void Simulator::TraceRegWr(int64_t value) {
   if (::v8::internal::FLAG_trace_sim) {
-    SNPrintF(trace_buf_, "%016lx", value);
+    SNPrintF(trace_buf_, "%016" PRIx64 " ", value);
   }
 }
 
@@ -1691,8 +1701,9 @@
 // TODO(plind): consider making icount_ printing a flag option.
 void Simulator::TraceMemRd(int64_t addr, int64_t value) {
   if (::v8::internal::FLAG_trace_sim) {
-    SNPrintF(trace_buf_, "%016lx <-- [%016lx]    (%ld)",
-             value, addr, icount_);
+    SNPrintF(trace_buf_,
+             "%016" PRIx64 "  <-- [%016" PRIx64 " ]    (%" PRId64 " )", value,
+             addr, icount_);
   }
 }
 
@@ -1701,19 +1712,20 @@
   if (::v8::internal::FLAG_trace_sim) {
     switch (t) {
       case BYTE:
-        SNPrintF(trace_buf_, "               %02x --> [%016lx]",
+        SNPrintF(trace_buf_, "               %02x --> [%016" PRIx64 " ]",
                  static_cast<int8_t>(value), addr);
         break;
       case HALF:
-        SNPrintF(trace_buf_, "            %04x --> [%016lx]",
+        SNPrintF(trace_buf_, "            %04x --> [%016" PRIx64 " ]",
                  static_cast<int16_t>(value), addr);
         break;
       case WORD:
-        SNPrintF(trace_buf_, "        %08x --> [%016lx]",
+        SNPrintF(trace_buf_, "        %08x --> [%016" PRIx64 " ]",
                  static_cast<int32_t>(value), addr);
         break;
       case DWORD:
-        SNPrintF(trace_buf_, "%016lx --> [%016lx]    (%ld)",
+        SNPrintF(trace_buf_,
+                 "%016" PRIx64 "  --> [%016" PRIx64 " ]    (%" PRId64 " )",
                  value, addr, icount_);
         break;
     }
@@ -1726,17 +1738,17 @@
 int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
   if (addr >=0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
-    PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+    PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+           " \n",
            addr, reinterpret_cast<intptr_t>(instr));
     DieOrDebug();
   }
-  if ((addr & 0x3) == 0) {
+  if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
     int32_t* ptr = reinterpret_cast<int32_t*>(addr);
     TraceMemRd(addr, static_cast<int64_t>(*ptr));
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
+  PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
          reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
   return 0;
@@ -1746,17 +1758,17 @@
 uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
   if (addr >=0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
-    PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+    PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+           " \n",
            addr, reinterpret_cast<intptr_t>(instr));
     DieOrDebug();
   }
-  if ((addr & 0x3) == 0) {
+  if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
     uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
     TraceMemRd(addr, static_cast<int64_t>(*ptr));
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
+  PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
          reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
   return 0;
@@ -1766,18 +1778,18 @@
 void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
   if (addr >= 0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
-    PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
+    PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+           " \n",
            addr, reinterpret_cast<intptr_t>(instr));
     DieOrDebug();
   }
-  if ((addr & 0x3) == 0) {
+  if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
     TraceMemWr(addr, value, WORD);
     int* ptr = reinterpret_cast<int*>(addr);
     *ptr = value;
     return;
   }
-  PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
+  PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
          reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
 }
@@ -1786,17 +1798,17 @@
 int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
   if (addr >=0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
-    PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+    PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+           " \n",
            addr, reinterpret_cast<intptr_t>(instr));
     DieOrDebug();
   }
-  if ((addr & kPointerAlignmentMask) == 0) {
+  if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
     int64_t* ptr = reinterpret_cast<int64_t*>(addr);
     TraceMemRd(addr, *ptr);
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
+  PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
          reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
   return 0;
@@ -1806,102 +1818,100 @@
 void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
   if (addr >= 0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
-    PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
+    PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+           "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     DieOrDebug();
   }
-  if ((addr & kPointerAlignmentMask) == 0) {
+  if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
     TraceMemWr(addr, value, DWORD);
     int64_t* ptr = reinterpret_cast<int64_t*>(addr);
     *ptr = value;
     return;
   }
-  PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
+  PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
          reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
 }
 
 
 double Simulator::ReadD(int64_t addr, Instruction* instr) {
-  if ((addr & kDoubleAlignmentMask) == 0) {
+  if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
     double* ptr = reinterpret_cast<double*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned (double) read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+         addr, reinterpret_cast<intptr_t>(instr));
   base::OS::Abort();
   return 0;
 }
 
 
 void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
-  if ((addr & kDoubleAlignmentMask) == 0) {
+  if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
     double* ptr = reinterpret_cast<double*>(addr);
     *ptr = value;
     return;
   }
-  PrintF("Unaligned (double) write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+         "\n",
+         addr, reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
 }
 
 
 uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
-  if ((addr & 1) == 0) {
+  if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     TraceMemRd(addr, static_cast<int64_t>(*ptr));
     return *ptr;
   }
-  PrintF("Unaligned unsigned halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64
+         " , pc=0x%08" V8PRIxPTR "\n",
+         addr, reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
   return 0;
 }
 
 
 int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
-  if ((addr & 1) == 0) {
+  if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
     int16_t* ptr = reinterpret_cast<int16_t*>(addr);
     TraceMemRd(addr, static_cast<int64_t>(*ptr));
     return *ptr;
   }
-  PrintF("Unaligned signed halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned signed halfword read at 0x%08" PRIx64
+         " , pc=0x%08" V8PRIxPTR "\n",
+         addr, reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
   return 0;
 }
 
 
 void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
-  if ((addr & 1) == 0) {
+  if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
     TraceMemWr(addr, value, HALF);
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     *ptr = value;
     return;
   }
-  PrintF(
-      "Unaligned unsigned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-      addr,
-      reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64
+         " , pc=0x%08" V8PRIxPTR "\n",
+         addr, reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
 }
 
 
 void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
-  if ((addr & 1) == 0) {
+  if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
     TraceMemWr(addr, value, HALF);
     int16_t* ptr = reinterpret_cast<int16_t*>(addr);
     *ptr = value;
     return;
   }
-  PrintF("Unaligned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+         "\n",
+         addr, reinterpret_cast<intptr_t>(instr));
   DieOrDebug();
 }
 
@@ -1950,7 +1960,7 @@
 
 // Unsupported instructions use Format to print an error and stop execution.
 void Simulator::Format(Instruction* instr, const char* format) {
-  PrintF("Simulator found unsupported instruction:\n 0x%08lx: %s\n",
+  PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n",
          reinterpret_cast<intptr_t>(instr), format);
   UNIMPLEMENTED_MIPS();
 }
@@ -2133,8 +2143,8 @@
       }
     } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
       if (::v8::internal::FLAG_trace_sim) {
-        PrintF("Call to host function at %p args %08lx\n",
-            reinterpret_cast<void*>(external), arg0);
+        PrintF("Call to host function at %p args %08" PRIx64 " \n",
+               reinterpret_cast<void*>(external), arg0);
       }
       SimulatorRuntimeDirectApiCall target =
           reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
@@ -2142,8 +2152,9 @@
     } else if (
         redirection->type() == ExternalReference::PROFILING_API_CALL) {
       if (::v8::internal::FLAG_trace_sim) {
-        PrintF("Call to host function at %p args %08lx %08lx\n",
-            reinterpret_cast<void*>(external), arg0, arg1);
+        PrintF("Call to host function at %p args %08" PRIx64 "  %08" PRIx64
+               " \n",
+               reinterpret_cast<void*>(external), arg0, arg1);
       }
       SimulatorRuntimeProfilingApiCall target =
           reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
@@ -2151,8 +2162,9 @@
     } else if (
         redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
       if (::v8::internal::FLAG_trace_sim) {
-        PrintF("Call to host function at %p args %08lx %08lx\n",
-            reinterpret_cast<void*>(external), arg0, arg1);
+        PrintF("Call to host function at %p args %08" PRIx64 "  %08" PRIx64
+               " \n",
+               reinterpret_cast<void*>(external), arg0, arg1);
       }
       SimulatorRuntimeDirectGetterCall target =
           reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
@@ -2160,8 +2172,9 @@
     } else if (
         redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
       if (::v8::internal::FLAG_trace_sim) {
-        PrintF("Call to host function at %p args %08lx %08lx %08lx\n",
-            reinterpret_cast<void*>(external), arg0, arg1, arg2);
+        PrintF("Call to host function at %p args %08" PRIx64 "  %08" PRIx64
+               "  %08" PRIx64 " \n",
+               reinterpret_cast<void*>(external), arg0, arg1, arg2);
       }
       SimulatorRuntimeProfilingGetterCall target =
           reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
@@ -2195,14 +2208,9 @@
       if (::v8::internal::FLAG_trace_sim) {
         PrintF(
             "Call to host function at %p "
-            "args %08lx, %08lx, %08lx, %08lx, %08lx, %08lx\n",
-            FUNCTION_ADDR(target),
-            arg0,
-            arg1,
-            arg2,
-            arg3,
-            arg4,
-            arg5);
+            "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+            " , %08" PRIx64 " , %08" PRIx64 " \n",
+            FUNCTION_ADDR(target), arg0, arg1, arg2, arg3, arg4, arg5);
       }
       // int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
       // set_register(v0, static_cast<int32_t>(result));
@@ -2212,7 +2220,8 @@
       set_register(v1, (int64_t)(result.y));
     }
      if (::v8::internal::FLAG_trace_sim) {
-      PrintF("Returned %08lx : %08lx\n", get_register(v1), get_register(v0));
+       PrintF("Returned %08" PRIx64 "  : %08" PRIx64 " \n", get_register(v1),
+              get_register(v0));
     }
     set_register(ra, saved_ra);
     set_pc(get_register(ra));
@@ -2241,7 +2250,8 @@
 void Simulator::PrintWatchpoint(uint64_t code) {
   MipsDebugger dbg(this);
   ++break_count_;
-  PrintF("\n---- break %ld marker: %3d  (instr count: %8ld) ----------"
+  PrintF("\n---- break %" PRId64 "  marker: %3d  (instr count: %8" PRId64
+         " ) ----------"
          "----------------------------------",
          code, break_count_, icount_);
   dbg.PrintAllRegs();  // Print registers and continue running.
@@ -2291,8 +2301,10 @@
 void Simulator::IncreaseStopCounter(uint64_t code) {
   DCHECK(code <= kMaxStopCode);
   if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
-    PrintF("Stop counter for code %ld has overflowed.\n"
-           "Enabling this code and reseting the counter to 0.\n", code);
+    PrintF("Stop counter for code %" PRId64
+           "  has overflowed.\n"
+           "Enabling this code and reseting the counter to 0.\n",
+           code);
     watched_stops_[code].count = 0;
     EnableStop(code);
   } else {
@@ -2315,11 +2327,11 @@
   // Don't print the state of unused breakpoints.
   if (count != 0) {
     if (watched_stops_[code].desc) {
-      PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i, \t%s\n",
+      PrintF("stop %" PRId64 "  - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n",
              code, code, state, count, watched_stops_[code].desc);
     } else {
-      PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i\n",
-             code, code, state, count);
+      PrintF("stop %" PRId64 "  - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code,
+             code, state, count);
     }
   }
 }
@@ -3420,21 +3432,50 @@
         // bits instruction. RS field is always equal to 0.
         // Sign-extend the 32-bit result.
         alu_out = static_cast<int32_t>(static_cast<uint32_t>(rt_u()) >> sa());
-      } else {
+      } else if (rs_reg() == 1) {
         // Logical right-rotate of a word by a fixed number of bits. This
         // is special case of SRL instruction, added in MIPS32 Release 2.
         // RS field is equal to 00001.
         alu_out = static_cast<int32_t>(
             base::bits::RotateRight32(static_cast<const uint32_t>(rt_u()),
                                       static_cast<const uint32_t>(sa())));
+      } else {
+        UNREACHABLE();
       }
       SetResult(rd_reg(), alu_out);
       break;
     case DSRL:
-      SetResult(rd_reg(), rt_u() >> sa());
+      if (rs_reg() == 0) {
+        // Regular logical right shift of a word by a fixed number of
+        // bits instruction. RS field is always equal to 0.
+        // Sign-extend the 64-bit result.
+        alu_out = static_cast<int64_t>(rt_u() >> sa());
+      } else if (rs_reg() == 1) {
+        // Logical right-rotate of a word by a fixed number of bits. This
+        // is special case of SRL instruction, added in MIPS32 Release 2.
+        // RS field is equal to 00001.
+        alu_out = static_cast<int64_t>(base::bits::RotateRight64(rt_u(), sa()));
+      } else {
+        UNREACHABLE();
+      }
+      SetResult(rd_reg(), alu_out);
       break;
     case DSRL32:
-      SetResult(rd_reg(), rt_u() >> sa() >> 32);
+      if (rs_reg() == 0) {
+        // Regular logical right shift of a word by a fixed number of
+        // bits instruction. RS field is always equal to 0.
+        // Sign-extend the 64-bit result.
+        alu_out = static_cast<int64_t>(rt_u() >> sa() >> 32);
+      } else if (rs_reg() == 1) {
+        // Logical right-rotate of a word by a fixed number of bits. This
+        // is special case of SRL instruction, added in MIPS32 Release 2.
+        // RS field is equal to 00001.
+        alu_out =
+            static_cast<int64_t>(base::bits::RotateRight64(rt_u(), sa() + 32));
+      } else {
+        UNREACHABLE();
+      }
+      SetResult(rd_reg(), alu_out);
       break;
     case SRA:
       SetResult(rd_reg(), (int32_t)rt() >> sa());
@@ -3470,12 +3511,13 @@
       if (sa() == 0) {
         // Regular logical right-shift of a word by a variable number of
         // bits instruction. SA field is always equal to 0.
-        alu_out = rt_u() >> rs();
+        alu_out = static_cast<int64_t>(rt_u() >> rs());
       } else {
         // Logical right-rotate of a word by a variable number of bits.
         // This is special case od SRLV instruction, added in MIPS32
         // Release 2. SA field is equal to 00001.
-        alu_out = base::bits::RotateRight64(rt_u(), rs_u());
+        alu_out =
+            static_cast<int64_t>(base::bits::RotateRight64(rt_u(), rs_u()));
       }
       SetResult(rd_reg(), alu_out);
       break;
@@ -3777,6 +3819,9 @@
     case TNE:
       do_interrupt = rs() != rt();
       break;
+    case SYNC:
+      // TODO(palfia): Ignore sync instruction for now.
+      break;
     // Conditional moves.
     case MOVN:
       if (rt()) {
@@ -4119,6 +4164,8 @@
   int64_t addr = 0x0;
   // Alignment for 32-bit integers used in LWL, LWR, etc.
   const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
+  // Alignment for 64-bit integers used in LDL, LDR, etc.
+  const int kInt64AlignmentMask = sizeof(uint64_t) - 1;
 
   // Branch instructions common part.
   auto BranchAndLinkHelper = [this, instr, &next_pc,
@@ -4366,6 +4413,7 @@
       break;
     // ------------- Arithmetic instructions.
     case ADDIU: {
+      DCHECK(is_int32(rs));
       int32_t alu32_out = static_cast<int32_t>(rs + se_imm16);
       // Sign-extend result of 32bit operation into 64bit register.
       SetResult(rt_reg, static_cast<int64_t>(alu32_out));
@@ -4420,10 +4468,10 @@
       uint8_t byte_shift = kInt32AlignmentMask - al_offset;
       uint32_t mask = (1 << byte_shift * 8) - 1;
       addr = rs + se_imm16 - al_offset;
-      alu_out = ReadW(addr, instr);
-      alu_out <<= byte_shift * 8;
-      alu_out |= rt & mask;
-      set_register(rt_reg, alu_out);
+      int32_t val = ReadW(addr, instr);
+      val <<= byte_shift * 8;
+      val |= rt & mask;
+      set_register(rt_reg, static_cast<int64_t>(val));
       break;
     }
     case LW:
@@ -4453,6 +4501,30 @@
       set_register(rt_reg, alu_out);
       break;
     }
+    case LDL: {
+      // al_offset is offset of the effective address within an aligned word.
+      uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
+      uint8_t byte_shift = kInt64AlignmentMask - al_offset;
+      uint64_t mask = (1UL << byte_shift * 8) - 1;
+      addr = rs + se_imm16 - al_offset;
+      alu_out = Read2W(addr, instr);
+      alu_out <<= byte_shift * 8;
+      alu_out |= rt & mask;
+      set_register(rt_reg, alu_out);
+      break;
+    }
+    case LDR: {
+      // al_offset is offset of the effective address within an aligned word.
+      uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
+      uint8_t byte_shift = kInt64AlignmentMask - al_offset;
+      uint64_t mask = al_offset ? (~0UL << (byte_shift + 1) * 8) : 0UL;
+      addr = rs + se_imm16 - al_offset;
+      alu_out = Read2W(addr, instr);
+      alu_out = alu_out >> al_offset * 8;
+      alu_out |= rt & mask;
+      set_register(rt_reg, alu_out);
+      break;
+    }
     case SB:
       WriteB(rs + se_imm16, static_cast<int8_t>(rt));
       break;
@@ -4484,6 +4556,25 @@
       WriteW(addr, static_cast<int32_t>(mem_value), instr);
       break;
     }
+    case SDL: {
+      uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
+      uint8_t byte_shift = kInt64AlignmentMask - al_offset;
+      uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0;
+      addr = rs + se_imm16 - al_offset;
+      uint64_t mem_value = Read2W(addr, instr) & mask;
+      mem_value |= rt >> byte_shift * 8;
+      Write2W(addr, mem_value, instr);
+      break;
+    }
+    case SDR: {
+      uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
+      uint64_t mask = (1UL << al_offset * 8) - 1;
+      addr = rs + se_imm16 - al_offset;
+      uint64_t mem_value = Read2W(addr, instr);
+      mem_value = (rt << al_offset * 8) | (mem_value & mask);
+      Write2W(addr, mem_value, instr);
+      break;
+    }
     case LWC1:
       set_fpu_register(ft_reg, kFPUInvalidResult);  // Trash upper 32 bits.
       set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
@@ -4642,8 +4733,9 @@
   }
 
   if (::v8::internal::FLAG_trace_sim) {
-    PrintF("  0x%08lx  %-44s   %s\n", reinterpret_cast<intptr_t>(instr),
-        buffer.start(), trace_buf_.start());
+    PrintF("  0x%08" PRIxPTR "   %-44s   %s\n",
+           reinterpret_cast<intptr_t>(instr), buffer.start(),
+           trace_buf_.start());
   }
 
   if (!pc_modified_) {
diff --git a/src/objects-body-descriptors-inl.h b/src/objects-body-descriptors-inl.h
index bfc1895..ee2e01e 100644
--- a/src/objects-body-descriptors-inl.h
+++ b/src/objects-body-descriptors-inl.h
@@ -472,6 +472,7 @@
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
+    case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
     case JS_BOUND_FUNCTION_TYPE:
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index b023b03..a8728fc 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -99,6 +99,7 @@
       Oddball::cast(this)->OddballVerify();
       break;
     case JS_OBJECT_TYPE:
+    case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_PROMISE_TYPE:
@@ -209,7 +210,7 @@
 void Symbol::SymbolVerify() {
   CHECK(IsSymbol());
   CHECK(HasHashCode());
-  CHECK(GetHeap()->hidden_properties_symbol() == this || Hash() > 0u);
+  CHECK(Hash() > 0u);
   CHECK(name()->IsUndefined() || name()->IsString());
 }
 
@@ -369,21 +370,6 @@
 }
 
 
-void CodeCache::CodeCacheVerify() {
-  VerifyHeapPointer(default_cache());
-  VerifyHeapPointer(normal_type_cache());
-  CHECK(default_cache()->IsFixedArray());
-  CHECK(normal_type_cache()->IsUndefined()
-         || normal_type_cache()->IsCodeCacheHashTable());
-}
-
-
-void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
-  VerifyHeapPointer(cache());
-  CHECK(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
-}
-
-
 void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
   VerifyObjectField(kStorage1Offset);
   VerifyObjectField(kStorage2Offset);
@@ -623,7 +609,7 @@
     CHECK(number->IsSmi());
     int value = Smi::cast(number)->value();
     // Hidden oddballs have negative smis.
-    const int kLeastHiddenOddballNumber = -6;
+    const int kLeastHiddenOddballNumber = -7;
     CHECK_LE(value, 1);
     CHECK(value >= kLeastHiddenOddballNumber);
   }
@@ -648,6 +634,8 @@
     CHECK(this == heap->exception());
   } else if (map() == heap->optimized_out_map()) {
     CHECK(this == heap->optimized_out());
+  } else if (map() == heap->stale_register_map()) {
+    CHECK(this == heap->stale_register());
   } else {
     UNREACHABLE();
   }
@@ -921,6 +909,7 @@
   VerifyPointer(expected_receiver_type());
   VerifyPointer(getter());
   VerifyPointer(setter());
+  VerifyPointer(js_getter());
   VerifyPointer(data());
 }
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index f4d7fb9..a64d9ff 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -18,6 +18,7 @@
 #include "src/conversions-inl.h"
 #include "src/factory.h"
 #include "src/field-index-inl.h"
+#include "src/field-type.h"
 #include "src/handles-inl.h"
 #include "src/heap/heap-inl.h"
 #include "src/heap/heap.h"
@@ -874,10 +875,6 @@
 
 bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
 
-bool HeapObject::IsPolymorphicCodeCacheHashTable() const {
-  return IsHashTable();
-}
-
 bool HeapObject::IsMapCache() const { return IsHashTable(); }
 
 bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
@@ -1150,6 +1147,12 @@
       object, HeapObject::RawField(object, offset), value); \
   heap->RecordWrite(object, offset, value);
 
+#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
+  do {                                                                 \
+    heap->RecordFixedArrayElements(array, start, length);              \
+    heap->incremental_marking()->IterateBlackObject(array);            \
+  } while (false)
+
 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
   if (mode != SKIP_WRITE_BARRIER) {                                  \
     if (mode == UPDATE_WRITE_BARRIER) {                              \
@@ -1260,8 +1263,7 @@
   return reinterpret_cast<Map*>(value_);
 }
 
-
-bool MapWord::IsForwardingAddress() {
+bool MapWord::IsForwardingAddress() const {
   return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
 }
 
@@ -1784,7 +1786,7 @@
                                         Object** objects,
                                         uint32_t count,
                                         EnsureElementsMode mode) {
-  ElementsKind current_kind = object->map()->elements_kind();
+  ElementsKind current_kind = object->GetElementsKind();
   ElementsKind target_kind = current_kind;
   {
     DisallowHeapAllocation no_allocation;
@@ -1908,6 +1910,13 @@
       constructor->shared()->get_api_func_data()->indexed_property_handler());
 }
 
+double Oddball::to_number_raw() const {
+  return READ_DOUBLE_FIELD(this, kToNumberRawOffset);
+}
+
+void Oddball::set_to_number_raw(double value) {
+  WRITE_DOUBLE_FIELD(this, kToNumberRawOffset, value);
+}
 
 ACCESSORS(Oddball, to_string, String, kToStringOffset)
 ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
@@ -2004,6 +2013,7 @@
   // field operations considerably on average.
   if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
   switch (type) {
+    case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
       return JSObject::kHeaderSize;
     case JS_GENERATOR_OBJECT_TYPE:
@@ -2793,6 +2803,25 @@
   return NULL;
 }
 
+// static
+Handle<Map> Map::ReconfigureProperty(Handle<Map> map, int modify_index,
+                                     PropertyKind new_kind,
+                                     PropertyAttributes new_attributes,
+                                     Representation new_representation,
+                                     Handle<FieldType> new_field_type,
+                                     StoreMode store_mode) {
+  return Reconfigure(map, map->elements_kind(), modify_index, new_kind,
+                     new_attributes, new_representation, new_field_type,
+                     store_mode);
+}
+
+// static
+Handle<Map> Map::ReconfigureElementsKind(Handle<Map> map,
+                                         ElementsKind new_elements_kind) {
+  return Reconfigure(map, new_elements_kind, -1, kData, NONE,
+                     Representation::None(), FieldType::None(map->GetIsolate()),
+                     ALLOW_IN_DESCRIPTOR);
+}
 
 Object** DescriptorArray::GetKeySlot(int descriptor_number) {
   DCHECK(descriptor_number < number_of_descriptors());
@@ -3037,13 +3066,14 @@
   uint32_t entry = FirstProbe(hash, capacity);
   uint32_t count = 1;
   // EnsureCapacity will guarantee the hash table is never full.
+  Object* undefined = isolate->heap()->undefined_value();
+  Object* the_hole = isolate->heap()->the_hole_value();
   while (true) {
     Object* element = KeyAt(entry);
     // Empty entry. Uses raw unchecked accessors because it is called by the
     // string table during bootstrapping.
-    if (element == isolate->heap()->root(Heap::kUndefinedValueRootIndex)) break;
-    if (element != isolate->heap()->root(Heap::kTheHoleValueRootIndex) &&
-        Shape::IsMatch(key, element)) return entry;
+    if (element == undefined) break;
+    if (element != the_hole && Shape::IsMatch(key, element)) return entry;
     entry = NextProbe(entry, count++, capacity);
   }
   return kNotFound;
@@ -3149,7 +3179,6 @@
 CAST_ACCESSOR(Oddball)
 CAST_ACCESSOR(OrderedHashMap)
 CAST_ACCESSOR(OrderedHashSet)
-CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
 CAST_ACCESSOR(PropertyCell)
 CAST_ACCESSOR(ScopeInfo)
 CAST_ACCESSOR(SeededNumberDictionary)
@@ -3896,7 +3925,6 @@
 
 int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
 
-
 byte ByteArray::get(int index) {
   DCHECK(index >= 0 && index < this->length());
   return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -3908,12 +3936,29 @@
   WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
 }
 
+void ByteArray::copy_in(int index, const byte* buffer, int length) {
+  DCHECK(index >= 0 && length >= 0 && index + length >= index &&
+         index + length <= this->length());
+  byte* dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+  memcpy(dst_addr, buffer, length);
+}
+
+void ByteArray::copy_out(int index, byte* buffer, int length) {
+  DCHECK(index >= 0 && length >= 0 && index + length >= index &&
+         index + length <= this->length());
+  const byte* src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+  memcpy(buffer, src_addr, length);
+}
 
 int ByteArray::get_int(int index) {
-  DCHECK(index >= 0 && (index * kIntSize) < this->length());
+  DCHECK(index >= 0 && index < this->length() / kIntSize);
   return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
 }
 
+void ByteArray::set_int(int index, int value) {
+  DCHECK(index >= 0 && index < this->length() / kIntSize);
+  WRITE_INT_FIELD(this, kHeaderSize + index * kIntSize, value);
+}
 
 ByteArray* ByteArray::FromDataStartAddress(Address address) {
   DCHECK_TAG_ALIGNED(address);
@@ -4441,11 +4486,6 @@
 }
 
 
-void Map::set_is_observed() { set_bit_field(bit_field() | (1 << kIsObserved)); }
-
-bool Map::is_observed() { return ((1 << kIsObserved) & bit_field()) != 0; }
-
-
 void Map::set_has_named_interceptor() {
   set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
 }
@@ -4624,7 +4664,9 @@
 
 
 bool Map::has_code_cache() {
-  return code_cache() != GetIsolate()->heap()->empty_fixed_array();
+  // Code caches are always fixed arrays. The empty fixed array is used as a
+  // sentinel for an absent code cache.
+  return FixedArray::cast(code_cache())->length() != 0;
 }
 
 
@@ -4789,10 +4831,6 @@
 }
 
 
-Code::StubType Code::type() {
-  return ExtractTypeFromFlags(flags());
-}
-
 // For initialization.
 void Code::set_raw_kind_specific_flags1(int value) {
   WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
@@ -5031,18 +5069,8 @@
   }
 }
 
-
-bool Code::is_keyed_stub() {
-  return is_keyed_load_stub() || is_keyed_store_stub();
-}
-
-
 bool Code::is_debug_stub() { return ic_state() == DEBUG_STUB; }
 bool Code::is_handler() { return kind() == HANDLER; }
-bool Code::is_load_stub() { return kind() == LOAD_IC; }
-bool Code::is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
-bool Code::is_store_stub() { return kind() == STORE_IC; }
-bool Code::is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
 bool Code::is_call_stub() { return kind() == CALL_IC; }
 bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
 bool Code::is_compare_ic_stub() { return kind() == COMPARE_IC; }
@@ -5070,11 +5098,10 @@
 }
 
 Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state,
-                               ExtraICState extra_ic_state, StubType type,
+                               ExtraICState extra_ic_state,
                                CacheHolderFlag holder) {
   // Compute the bit mask.
   unsigned int bits = KindField::encode(kind) | ICStateField::encode(ic_state) |
-                      TypeField::encode(type) |
                       ExtraICStateField::encode(extra_ic_state) |
                       CacheHolderField::encode(holder);
   return static_cast<Flags>(bits);
@@ -5082,15 +5109,13 @@
 
 Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
                                           ExtraICState extra_ic_state,
-                                          CacheHolderFlag holder,
-                                          StubType type) {
-  return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
+                                          CacheHolderFlag holder) {
+  return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, holder);
 }
 
-
-Code::Flags Code::ComputeHandlerFlags(Kind handler_kind, StubType type,
+Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
                                       CacheHolderFlag holder) {
-  return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
+  return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, holder);
 }
 
 
@@ -5109,23 +5134,12 @@
 }
 
 
-Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
-  return TypeField::decode(flags);
-}
-
 CacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
   return CacheHolderField::decode(flags);
 }
 
-
-Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
-  int bits = flags & ~TypeField::kMask;
-  return static_cast<Flags>(bits);
-}
-
-
-Code::Flags Code::RemoveTypeAndHolderFromFlags(Flags flags) {
-  int bits = flags & ~TypeField::kMask & ~CacheHolderField::kMask;
+Code::Flags Code::RemoveHolderFromFlags(Flags flags) {
+  int bits = flags & ~CacheHolderField::kMask;
   return static_cast<Flags>(bits);
 }
 
@@ -5425,8 +5439,6 @@
 }
 
 
-ACCESSORS(JSBoundFunction, length, Object, kLengthOffset)
-ACCESSORS(JSBoundFunction, name, Object, kNameOffset)
 ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
           kBoundTargetFunctionOffset)
 ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
@@ -5449,6 +5461,7 @@
 
 ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
 ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(AccessorInfo, js_getter, Object, kJsGetterOffset)
 ACCESSORS(AccessorInfo, data, Object, kDataOffset)
 
 ACCESSORS(Box, value, Object, kValueOffset)
@@ -5534,8 +5547,7 @@
 SMI_ACCESSORS(Script, type, kTypeOffset)
 ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
 ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-SMI_ACCESSORS(Script, eval_from_instructions_offset,
-              kEvalFrominstructionsOffsetOffset)
+SMI_ACCESSORS(Script, eval_from_position, kEvalFromPositionOffset)
 ACCESSORS(Script, shared_function_infos, Object, kSharedFunctionInfosOffset)
 SMI_ACCESSORS(Script, flags, kFlagsOffset)
 ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
@@ -5778,7 +5790,6 @@
   set_compiler_hints(hints);
 }
 
-
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
                kNeedsHomeObject)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
@@ -5794,6 +5805,7 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_async, kIsAsyncFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
                kIsConciseMethod)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_getter_function,
@@ -5803,10 +5815,9 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
                kIsDefaultConstructor)
 
-ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
-ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
-
-ACCESSORS(PolymorphicCodeCache, cache, Object, kCacheOffset)
+inline bool SharedFunctionInfo::is_resumable() const {
+  return is_generator() || is_async();
+}
 
 bool Script::HasValidSource() {
   Object* src = this->source();
@@ -5890,6 +5901,7 @@
   Builtins* builtins = GetIsolate()->builtins();
   DCHECK(code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent));
   DCHECK(code() != builtins->builtin(Builtins::kCompileOptimized));
+  DCHECK(code() != builtins->builtin(Builtins::kCompileBaseline));
   return code() != builtins->builtin(Builtins::kCompileLazy);
 }
 
@@ -5937,7 +5949,6 @@
   return function_data()->IsBytecodeArray();
 }
 
-
 BytecodeArray* SharedFunctionInfo::bytecode_array() {
   DCHECK(HasBytecodeArray());
   return BytecodeArray::cast(function_data());
@@ -6085,6 +6096,10 @@
   return code()->kind() == Code::OPTIMIZED_FUNCTION;
 }
 
+bool JSFunction::IsMarkedForBaseline() {
+  return code() ==
+         GetIsolate()->builtins()->builtin(Builtins::kCompileBaseline);
+}
 
 bool JSFunction::IsMarkedForOptimization() {
   return code() == GetIsolate()->builtins()->builtin(
@@ -6250,6 +6265,7 @@
 bool JSFunction::is_compiled() {
   Builtins* builtins = GetIsolate()->builtins();
   return code() != builtins->builtin(Builtins::kCompileLazy) &&
+         code() != builtins->builtin(Builtins::kCompileBaseline) &&
          code() != builtins->builtin(Builtins::kCompileOptimized) &&
          code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
 }
@@ -6306,13 +6322,14 @@
 ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
 ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
 ACCESSORS(JSGeneratorObject, input, Object, kInputOffset)
+SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
 SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
 ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
 
 bool JSGeneratorObject::is_suspended() {
-  DCHECK_LT(kGeneratorExecuting, kGeneratorClosed);
-  DCHECK_EQ(kGeneratorClosed, 0);
-  return continuation() > 0;
+  DCHECK_LT(kGeneratorExecuting, 0);
+  DCHECK_LT(kGeneratorClosed, 0);
+  return continuation() >= 0;
 }
 
 bool JSGeneratorObject::is_closed() {
@@ -7120,7 +7137,7 @@
                                        Handle<Name> name) {
   if (object->IsJSObject()) {  // Shortcut
     LookupIterator it = LookupIterator::PropertyOrElement(
-        object->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
+        object->GetIsolate(), object, name, object, LookupIterator::OWN);
     return HasProperty(&it);
   }
 
@@ -7130,6 +7147,19 @@
   return Just(attributes.FromJust() != ABSENT);
 }
 
+Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
+                                       uint32_t index) {
+  if (object->IsJSObject()) {  // Shortcut
+    LookupIterator it(object->GetIsolate(), object, index, object,
+                      LookupIterator::OWN);
+    return HasProperty(&it);
+  }
+
+  Maybe<PropertyAttributes> attributes =
+      JSReceiver::GetOwnPropertyAttributes(object, index);
+  MAYBE_RETURN(attributes, Nothing<bool>());
+  return Just(attributes.FromJust() != ABSENT);
+}
 
 Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
     Handle<JSReceiver> object, Handle<Name> name) {
@@ -7142,10 +7172,16 @@
 Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
     Handle<JSReceiver> object, Handle<Name> name) {
   LookupIterator it = LookupIterator::PropertyOrElement(
-      name->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
+      name->GetIsolate(), object, name, object, LookupIterator::OWN);
   return GetPropertyAttributes(&it);
 }
 
+Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
+    Handle<JSReceiver> object, uint32_t index) {
+  LookupIterator it(object->GetIsolate(), object, index, object,
+                    LookupIterator::OWN);
+  return GetPropertyAttributes(&it);
+}
 
 Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
   LookupIterator it(object->GetIsolate(), object, index, object);
@@ -7164,7 +7200,7 @@
 Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
     Handle<JSReceiver> object, uint32_t index) {
   Isolate* isolate = object->GetIsolate();
-  LookupIterator it(isolate, object, index, object, LookupIterator::HIDDEN);
+  LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
   return GetPropertyAttributes(&it);
 }
 
@@ -7535,7 +7571,6 @@
   // Please note this function is used during marking:
   //  - MarkCompactCollector::MarkUnmarkedObject
   //  - IncrementalMarking::Step
-  DCHECK(!heap->InNewSpace(heap->empty_fixed_array()));
   WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
 }
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 58092a4..551beb2 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -33,7 +33,13 @@
 
 
 void HeapObject::PrintHeader(std::ostream& os, const char* id) {  // NOLINT
-  os << reinterpret_cast<void*>(this) << ": [" << id << "]";
+  os << reinterpret_cast<void*>(this) << ": [";
+  if (id != nullptr) {
+    os << id;
+  } else {
+    os << map()->instance_type();
+  }
+  os << "]";
 }
 
 
@@ -95,13 +101,16 @@
       os << "filler";
       break;
     case JS_OBJECT_TYPE:  // fall through
+    case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
-    case JS_ARRAY_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_PROMISE_TYPE:
       JSObject::cast(this)->JSObjectPrint(os);
       break;
+    case JS_ARRAY_TYPE:
+      JSArray::cast(this)->JSArrayPrint(os);
+      break;
     case JS_REGEXP_TYPE:
       JSRegExp::cast(this)->JSRegExpPrint(os);
       break;
@@ -396,16 +405,20 @@
   obj->PrintHeader(os, id);
   // Don't call GetElementsKind, its validation code can cause the printer to
   // fail when debugging.
-  os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " ["
+  os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " [";
+  if (obj->HasFastProperties()) {
+    os << "FastProperties";
+  } else {
+    os << "DictionaryProperties";
+  }
+  PrototypeIterator iter(obj->GetIsolate(), obj);
+  os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
+  os << "\n - elements = " << Brief(obj->elements()) << " ["
      << ElementsKindToString(obj->map()->elements_kind());
   if (obj->elements()->map() == obj->GetHeap()->fixed_cow_array_map()) {
     os << " (COW)";
   }
-  PrototypeIterator iter(obj->GetIsolate(), obj);
-  os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
-  if (obj->elements()->length() > 0) {
-    os << "\n - elements = " << Brief(obj->elements());
-  }
+  os << "]";
 }
 
 
@@ -413,14 +426,19 @@
                               bool print_elements = true) {
   os << "\n {";
   obj->PrintProperties(os);
-  obj->PrintTransitions(os);
   if (print_elements) obj->PrintElements(os);
   os << "\n }\n";
 }
 
 
 void JSObject::JSObjectPrint(std::ostream& os) {  // NOLINT
-  JSObjectPrintHeader(os, this, "JSObject");
+  JSObjectPrintHeader(os, this, nullptr);
+  JSObjectPrintBody(os, this);
+}
+
+void JSArray::JSArrayPrint(std::ostream& os) {  // NOLINT
+  JSObjectPrintHeader(os, this, "JSArray");
+  os << "\n - length = " << Brief(this->length());
   JSObjectPrintBody(os, this);
 }
 
@@ -478,7 +496,6 @@
   if (is_constructor()) os << "\n - constructor";
   if (is_access_check_needed()) os << "\n - access_check_needed";
   if (!is_extensible()) os << "\n - non-extensible";
-  if (is_observed()) os << "\n - observed";
   if (is_prototype_map()) {
     os << "\n - prototype_map";
     os << "\n - prototype info: " << Brief(prototype_info());
@@ -506,20 +523,6 @@
 }
 
 
-void CodeCache::CodeCachePrint(std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "CodeCache");
-  os << "\n - default_cache: " << Brief(default_cache());
-  os << "\n - normal_type_cache: " << Brief(normal_type_cache());
-}
-
-
-void PolymorphicCodeCache::PolymorphicCodeCachePrint(
-    std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "PolymorphicCodeCache");
-  os << "\n - cache: " << Brief(cache());
-}
-
-
 void TypeFeedbackInfo::TypeFeedbackInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "TypeFeedbackInfo");
   os << "\n - ic_total_count: " << ic_total_count()
@@ -988,6 +991,7 @@
   os << "\n - flag: " << flag();
   os << "\n - getter: " << Brief(getter());
   os << "\n - setter: " << Brief(setter());
+  os << "\n - js_getter: " << Brief(js_getter());
   os << "\n - data: " << Brief(data());
   os << "\n";
 }
@@ -1140,8 +1144,7 @@
   os << "\n - compilation type: " << compilation_type();
   os << "\n - line ends: " << Brief(line_ends());
   os << "\n - eval from shared: " << Brief(eval_from_shared());
-  os << "\n - eval from instructions offset: "
-     << eval_from_instructions_offset();
+  os << "\n - eval from position: " << eval_from_position();
   os << "\n - shared function infos: " << Brief(shared_function_infos());
   os << "\n";
 }
@@ -1293,7 +1296,7 @@
   for (int i = 0; i < num_transitions; i++) {
     Name* key = GetKey(transitions, i);
     Map* target = GetTarget(transitions, i);
-    os << "\n   ";
+    os << "\n     ";
 #ifdef OBJECT_PRINT
     key->NamePrint(os);
 #else
@@ -1312,8 +1315,6 @@
          << ")";
     } else if (key == heap->strict_function_transition_symbol()) {
       os << " (transition to strict function)";
-    } else if (key == heap->observed_symbol()) {
-      os << " (transition to Object.observe)";
     } else {
       PropertyDetails details = GetTargetDetails(key, target);
       os << "(transition to ";
diff --git a/src/objects.cc b/src/objects.cc
index 1a82c3c..addf97a 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -12,9 +12,9 @@
 
 #include "src/accessors.h"
 #include "src/allocation-site-scopes.h"
-#include "src/api.h"
 #include "src/api-arguments.h"
 #include "src/api-natives.h"
+#include "src/api.h"
 #include "src/base/bits.h"
 #include "src/base/utils/random-number-generator.h"
 #include "src/bootstrapper.h"
@@ -22,6 +22,8 @@
 #include "src/codegen.h"
 #include "src/compilation-dependencies.h"
 #include "src/compiler.h"
+#include "src/counters-inl.h"
+#include "src/counters.h"
 #include "src/date.h"
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
@@ -30,6 +32,7 @@
 #include "src/field-index-inl.h"
 #include "src/field-index.h"
 #include "src/field-type.h"
+#include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 #include "src/identity-map.h"
@@ -122,7 +125,7 @@
   if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
   if (*object == isolate->heap()->null_value() ||
       *object == isolate->heap()->undefined_value()) {
-    return handle(isolate->global_proxy(), isolate);
+    return isolate->global_proxy();
   }
   return Object::ToObject(isolate, object);
 }
@@ -568,6 +571,88 @@
                                               NumberToInt32(*rhs));
 }
 
+// static
+MaybeHandle<Object> Object::OrdinaryHasInstance(Isolate* isolate,
+                                                Handle<Object> callable,
+                                                Handle<Object> object) {
+  // The {callable} must have a [[Call]] internal method.
+  if (!callable->IsCallable()) return isolate->factory()->false_value();
+
+  // Check if {callable} is a bound function, and if so retrieve its
+  // [[BoundTargetFunction]] and use that instead of {callable}.
+  if (callable->IsJSBoundFunction()) {
+    Handle<Object> bound_callable(
+        Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+        isolate);
+    return Object::InstanceOf(isolate, object, bound_callable);
+  }
+
+  // If {object} is not a receiver, return false.
+  if (!object->IsJSReceiver()) return isolate->factory()->false_value();
+
+  // Get the "prototype" of {callable}; raise an error if it's not a receiver.
+  Handle<Object> prototype;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, prototype,
+      Object::GetProperty(callable, isolate->factory()->prototype_string()),
+      Object);
+  if (!prototype->IsJSReceiver()) {
+    THROW_NEW_ERROR(
+        isolate,
+        NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype),
+        Object);
+  }
+
+  // Return whether or not {prototype} is in the prototype chain of {object}.
+  Maybe<bool> result = JSReceiver::HasInPrototypeChain(
+      isolate, Handle<JSReceiver>::cast(object), prototype);
+  if (result.IsNothing()) return MaybeHandle<Object>();
+  return isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// static
+MaybeHandle<Object> Object::InstanceOf(Isolate* isolate, Handle<Object> object,
+                                       Handle<Object> callable) {
+  if (FLAG_harmony_instanceof) {
+    // The {callable} must be a receiver.
+    if (!callable->IsJSReceiver()) {
+      THROW_NEW_ERROR(
+          isolate, NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck),
+          Object);
+    }
+
+    // Lookup the @@hasInstance method on {callable}.
+    Handle<Object> inst_of_handler;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, inst_of_handler,
+        JSReceiver::GetMethod(Handle<JSReceiver>::cast(callable),
+                              isolate->factory()->has_instance_symbol()),
+        Object);
+    if (!inst_of_handler->IsUndefined()) {
+      // Call the {inst_of_handler} on the {callable}.
+      Handle<Object> result;
+      ASSIGN_RETURN_ON_EXCEPTION(
+          isolate, result,
+          Execution::Call(isolate, inst_of_handler, callable, 1, &object),
+          Object);
+      return isolate->factory()->ToBoolean(result->BooleanValue());
+    }
+  }
+
+  // The {callable} must have a [[Call]] internal method.
+  if (!callable->IsCallable()) {
+    THROW_NEW_ERROR(
+        isolate, NewTypeError(MessageTemplate::kNonCallableInInstanceOfCheck),
+        Object);
+  }
+
+  // Fall back to OrdinaryHasInstance with {callable} and {object}.
+  Handle<Object> result;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, result,
+      JSReceiver::OrdinaryHasInstance(isolate, callable, object), Object);
+  return result;
+}
 
 Maybe<bool> Object::IsArray(Handle<Object> object) {
   if (object->IsJSArray()) return Just(true);
@@ -594,7 +679,7 @@
   auto isolate = js_object->GetIsolate();
   // TODO(dcarney): this should just be read from the symbol registry so as not
   // to be context dependent.
-  auto key = isolate->factory()->promise_status_symbol();
+  auto key = isolate->factory()->promise_state_symbol();
   // Shouldn't be possible to throw here.
   return JSObject::HasRealNamedProperty(js_object, key).FromJust();
 }
@@ -762,17 +847,6 @@
 }
 
 
-#define STACK_CHECK(result_value)                        \
-  do {                                                   \
-    StackLimitCheck stack_check(isolate);                \
-    if (stack_check.HasOverflowed()) {                   \
-      isolate->Throw(*isolate->factory()->NewRangeError( \
-          MessageTemplate::kStackOverflow));             \
-      return result_value;                               \
-    }                                                    \
-  } while (false)
-
-
 // static
 MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
                                          Handle<JSProxy> proxy,
@@ -788,7 +862,7 @@
   }
 
   DCHECK(!name->IsPrivate());
-  STACK_CHECK(MaybeHandle<Object>());
+  STACK_CHECK(isolate, MaybeHandle<Object>());
   Handle<Name> trap_name = isolate->factory()->get_string();
   // 1. Assert: IsPropertyKey(P) is true.
   // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
@@ -1001,7 +1075,7 @@
   Isolate* isolate = proxy->GetIsolate();
   Handle<String> trap_name = isolate->factory()->getPrototypeOf_string();
 
-  STACK_CHECK(MaybeHandle<Object>());
+  STACK_CHECK(isolate, MaybeHandle<Object>());
 
   // 1. Let handler be the value of the [[ProxyHandler]] internal slot.
   // 2. If handler is null, throw a TypeError exception.
@@ -1117,6 +1191,20 @@
   return ReadAbsentProperty(isolate, receiver, it->GetName());
 }
 
+// static
+Address AccessorInfo::redirect(Isolate* isolate, Address address,
+                               AccessorComponent component) {
+  ApiFunction fun(address);
+  DCHECK_EQ(ACCESSOR_GETTER, component);
+  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+  return ExternalReference(&fun, type, isolate).address();
+}
+
+Address AccessorInfo::redirected_getter() const {
+  Address accessor = v8::ToCData<Address>(getter());
+  if (accessor == nullptr) return nullptr;
+  return redirect(GetIsolate(), accessor, ACCESSOR_GETTER);
+}
 
 bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
                                            Handle<AccessorInfo> info,
@@ -1383,6 +1471,7 @@
   }
 }
 
+// static
 Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
                                             Handle<JSReceiver> object,
                                             Handle<Object> proto) {
@@ -1396,7 +1485,6 @@
   }
 }
 
-
 Map* Object::GetRootMap(Isolate* isolate) {
   DisallowHeapAllocation no_alloc;
   if (IsSmi()) {
@@ -1555,7 +1643,6 @@
 
 MaybeHandle<Object> Object::ArraySpeciesConstructor(
     Isolate* isolate, Handle<Object> original_array) {
-  Handle<Context> native_context = isolate->native_context();
   Handle<Object> default_species = isolate->array_function();
   if (!FLAG_harmony_species) {
     return default_species;
@@ -1580,7 +1667,7 @@
           isolate, constructor_context,
           JSReceiver::GetFunctionRealm(Handle<JSReceiver>::cast(constructor)),
           Object);
-      if (*constructor_context != *native_context &&
+      if (*constructor_context != *isolate->native_context() &&
           *constructor == constructor_context->array_function()) {
         constructor = isolate->factory()->undefined_value();
       }
@@ -1905,15 +1992,7 @@
     }
     case JS_BOUND_FUNCTION_TYPE: {
       JSBoundFunction* bound_function = JSBoundFunction::cast(this);
-      Object* name = bound_function->name();
       accumulator->Add("<JS BoundFunction");
-      if (name->IsString()) {
-        String* str = String::cast(name);
-        if (str->length() > 0) {
-          accumulator->Add(" ");
-          accumulator->Put(str);
-        }
-      }
       accumulator->Add(
           " (BoundTargetFunction %p)>",
           reinterpret_cast<void*>(bound_function->bound_target_function()));
@@ -1946,6 +2025,18 @@
       if (!printed) {
         accumulator->Add("<JS Function");
       }
+      if (FLAG_trace_file_names) {
+        Object* source_name =
+            Script::cast(function->shared()->script())->name();
+        if (source_name->IsString()) {
+          String* str = String::cast(source_name);
+          if (str->length() > 0) {
+            accumulator->Add(" <");
+            accumulator->Put(str);
+            accumulator->Add(">");
+          }
+        }
+      }
       accumulator->Add(" (SharedFunctionInfo %p)",
                        reinterpret_cast<void*>(function->shared()));
       accumulator->Put('>');
@@ -2227,7 +2318,9 @@
       } else if (IsFalse()) {
         os << "<false>";
       } else {
-        os << "<Odd Oddball>";
+        os << "<Odd Oddball: ";
+        os << Oddball::cast(this)->to_string()->ToCString().get();
+        os << ">";
       }
       break;
     }
@@ -2644,24 +2737,6 @@
 }
 
 
-MaybeHandle<Object> JSObject::EnqueueChangeRecord(Handle<JSObject> object,
-                                                  const char* type_str,
-                                                  Handle<Name> name,
-                                                  Handle<Object> old_value) {
-  DCHECK(!object->IsJSGlobalProxy());
-  DCHECK(!object->IsJSGlobalObject());
-  Isolate* isolate = object->GetIsolate();
-  HandleScope scope(isolate);
-  Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str);
-  Handle<Object> args[] = { type, object, name, old_value };
-  int argc = name.is_null() ? 2 : old_value->IsTheHole() ? 3 : 4;
-
-  return Execution::Call(isolate,
-                         Handle<JSFunction>(isolate->observers_notify_change()),
-                         isolate->factory()->undefined_value(), argc, args);
-}
-
-
 const char* Representation::Mnemonic() const {
   switch (kind_) {
     case kNone: return "v";
@@ -2677,6 +2752,16 @@
   }
 }
 
+bool Map::InstancesNeedRewriting(Map* target) {
+  int target_number_of_fields = target->NumberOfFields();
+  int target_inobject = target->GetInObjectProperties();
+  int target_unused = target->unused_property_fields();
+  int old_number_of_fields;
+
+  return InstancesNeedRewriting(target, target_number_of_fields,
+                                target_inobject, target_unused,
+                                &old_number_of_fields);
+}
 
 bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
                                  int target_inobject, int target_unused,
@@ -3137,10 +3222,10 @@
   return result;
 }
 
-
 Handle<Map> Map::CopyGeneralizeAllRepresentations(
-    Handle<Map> map, int modify_index, StoreMode store_mode, PropertyKind kind,
-    PropertyAttributes attributes, const char* reason) {
+    Handle<Map> map, ElementsKind elements_kind, int modify_index,
+    StoreMode store_mode, PropertyKind kind, PropertyAttributes attributes,
+    const char* reason) {
   Isolate* isolate = map->GetIsolate();
   Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
@@ -3196,6 +3281,7 @@
           MaybeHandle<Object>());
     }
   }
+  new_map->set_elements_kind(elements_kind);
   return new_map;
 }
 
@@ -3448,9 +3534,9 @@
   }
 }
 
-
-// Reconfigures property at |modify_index| with |new_kind|, |new_attributes|,
-// |store_mode| and/or |new_representation|/|new_field_type|.
+// Reconfigures elements kind to |new_elements_kind| and/or property at
+// |modify_index| with |new_kind|, |new_attributes|, |store_mode| and/or
+// |new_representation|/|new_field_type|.
 // If |modify_index| is negative then no properties are reconfigured but the
 // map is migrated to the up-to-date non-deprecated state.
 //
@@ -3460,6 +3546,7 @@
 // any potential new (partial) version of the type in the transition tree.
 // To do this, on each rewrite:
 // - Search the root of the transition tree using FindRootMap.
+// - Find/create a |root_map| with requested |new_elements_kind|.
 // - Find |target_map|, the newest matching version of this map using the
 //   virtually "enhanced" |old_map|'s descriptor array (i.e. whose entry at
 //   |modify_index| is considered to be of |new_kind| and having
@@ -3475,12 +3562,13 @@
 //   Return it.
 // - Otherwise, invalidate the outdated transition target from |target_map|, and
 //   replace its transition tree with a new branch for the updated descriptors.
-Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
-                                     PropertyKind new_kind,
-                                     PropertyAttributes new_attributes,
-                                     Representation new_representation,
-                                     Handle<FieldType> new_field_type,
-                                     StoreMode store_mode) {
+Handle<Map> Map::Reconfigure(Handle<Map> old_map,
+                             ElementsKind new_elements_kind, int modify_index,
+                             PropertyKind new_kind,
+                             PropertyAttributes new_attributes,
+                             Representation new_representation,
+                             Handle<FieldType> new_field_type,
+                             StoreMode store_mode) {
   DCHECK_NE(kAccessor, new_kind);  // TODO(ishell): not supported yet.
   DCHECK(store_mode != FORCE_FIELD || modify_index >= 0);
   Isolate* isolate = old_map->GetIsolate();
@@ -3495,7 +3583,8 @@
   // uninitialized value for representation None can be overwritten by both
   // smi and tagged values. Doubles, however, would require a box allocation.
   if (modify_index >= 0 && !new_representation.IsNone() &&
-      !new_representation.IsDouble()) {
+      !new_representation.IsDouble() &&
+      old_map->elements_kind() == new_elements_kind) {
     PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
     Representation old_representation = old_details.representation();
 
@@ -3528,38 +3617,39 @@
   // Check the state of the root map.
   Handle<Map> root_map(old_map->FindRootMap(), isolate);
   if (!old_map->EquivalentToForTransition(*root_map)) {
-    return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-                                            new_kind, new_attributes,
-                                            "GenAll_NotEquivalent");
+    return CopyGeneralizeAllRepresentations(
+        old_map, new_elements_kind, modify_index, store_mode, new_kind,
+        new_attributes, "GenAll_NotEquivalent");
   }
 
   ElementsKind from_kind = root_map->elements_kind();
-  ElementsKind to_kind = old_map->elements_kind();
+  ElementsKind to_kind = new_elements_kind;
   // TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
   if (from_kind != to_kind && to_kind != DICTIONARY_ELEMENTS &&
+      to_kind != SLOW_STRING_WRAPPER_ELEMENTS &&
       to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
       !(IsTransitionableFastElementsKind(from_kind) &&
         IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
-    return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-                                            new_kind, new_attributes,
-                                            "GenAll_InvalidElementsTransition");
+    return CopyGeneralizeAllRepresentations(
+        old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+        "GenAll_InvalidElementsTransition");
   }
   int root_nof = root_map->NumberOfOwnDescriptors();
   if (modify_index >= 0 && modify_index < root_nof) {
     PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
     if (old_details.kind() != new_kind ||
         old_details.attributes() != new_attributes) {
-      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-                                              new_kind, new_attributes,
-                                              "GenAll_RootModification1");
+      return CopyGeneralizeAllRepresentations(
+          old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+          "GenAll_RootModification1");
     }
     if ((old_details.type() != DATA && store_mode == FORCE_FIELD) ||
         (old_details.type() == DATA &&
          (!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
           !new_representation.fits_into(old_details.representation())))) {
-      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-                                              new_kind, new_attributes,
-                                              "GenAll_RootModification2");
+      return CopyGeneralizeAllRepresentations(
+          old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+          "GenAll_RootModification2");
     }
   }
 
@@ -3613,9 +3703,9 @@
     if (next_kind == kAccessor &&
         !EqualImmutableValues(old_descriptors->GetValue(i),
                               tmp_descriptors->GetValue(i))) {
-      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-                                              new_kind, new_attributes,
-                                              "GenAll_Incompatible");
+      return CopyGeneralizeAllRepresentations(
+          old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+          "GenAll_Incompatible");
     }
     if (next_location == kField && tmp_details.location() == kDescriptor) break;
 
@@ -3708,9 +3798,9 @@
     if (next_kind == kAccessor &&
         !EqualImmutableValues(old_descriptors->GetValue(i),
                               tmp_descriptors->GetValue(i))) {
-      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-                                              new_kind, new_attributes,
-                                              "GenAll_Incompatible");
+      return CopyGeneralizeAllRepresentations(
+          old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+          "GenAll_Incompatible");
     }
     DCHECK(!tmp_map->is_deprecated());
     target_map = tmp_map;
@@ -3941,9 +4031,9 @@
   // could be inserted regardless of whether transitions array is full or not.
   if (maybe_transition == NULL &&
       !TransitionArray::CanHaveMoreTransitions(split_map)) {
-    return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-                                            new_kind, new_attributes,
-                                            "GenAll_CantHaveMoreTransitions");
+    return CopyGeneralizeAllRepresentations(
+        old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
+        "GenAll_CantHaveMoreTransitions");
   }
 
   old_map->NotifyLeafMapLayoutChange();
@@ -4024,18 +4114,27 @@
     if (root_map == NULL) return MaybeHandle<Map>();
     // From here on, use the map with correct elements kind as root map.
   }
-  int root_nof = root_map->NumberOfOwnDescriptors();
+  Map* new_map = root_map->TryReplayPropertyTransitions(*old_map);
+  if (new_map == nullptr) return MaybeHandle<Map>();
+  return handle(new_map);
+}
+
+Map* Map::TryReplayPropertyTransitions(Map* old_map) {
+  DisallowHeapAllocation no_allocation;
+  DisallowDeoptimization no_deoptimization(GetIsolate());
+
+  int root_nof = NumberOfOwnDescriptors();
 
   int old_nof = old_map->NumberOfOwnDescriptors();
   DescriptorArray* old_descriptors = old_map->instance_descriptors();
 
-  Map* new_map = root_map;
+  Map* new_map = this;
   for (int i = root_nof; i < old_nof; ++i) {
     PropertyDetails old_details = old_descriptors->GetDetails(i);
     Map* transition = TransitionArray::SearchTransition(
         new_map, old_details.kind(), old_descriptors->GetKey(i),
         old_details.attributes());
-    if (transition == NULL) return MaybeHandle<Map>();
+    if (transition == NULL) return nullptr;
     new_map = transition;
     DescriptorArray* new_descriptors = new_map->instance_descriptors();
 
@@ -4043,7 +4142,7 @@
     DCHECK_EQ(old_details.kind(), new_details.kind());
     DCHECK_EQ(old_details.attributes(), new_details.attributes());
     if (!old_details.representation().fits_into(new_details.representation())) {
-      return MaybeHandle<Map>();
+      return nullptr;
     }
     switch (new_details.type()) {
       case DATA: {
@@ -4051,20 +4150,20 @@
         // Cleared field types need special treatment. They represent lost
         // knowledge, so we must first generalize the new_type to "Any".
         if (FieldTypeIsCleared(new_details.representation(), new_type)) {
-          return MaybeHandle<Map>();
+          return nullptr;
         }
         PropertyType old_property_type = old_details.type();
         if (old_property_type == DATA) {
           FieldType* old_type = old_descriptors->GetFieldType(i);
           if (FieldTypeIsCleared(old_details.representation(), old_type) ||
               !old_type->NowIs(new_type)) {
-            return MaybeHandle<Map>();
+            return nullptr;
           }
         } else {
           DCHECK(old_property_type == DATA_CONSTANT);
           Object* old_value = old_descriptors->GetValue(i);
           if (!new_type->NowContains(old_value)) {
-            return MaybeHandle<Map>();
+            return nullptr;
           }
         }
         break;
@@ -4082,14 +4181,14 @@
         Object* old_value = old_descriptors->GetValue(i);
         Object* new_value = new_descriptors->GetValue(i);
         if (old_details.location() == kField || old_value != new_value) {
-          return MaybeHandle<Map>();
+          return nullptr;
         }
         break;
       }
     }
   }
-  if (new_map->NumberOfOwnDescriptors() != old_nof) return MaybeHandle<Map>();
-  return handle(new_map);
+  if (new_map->NumberOfOwnDescriptors() != old_nof) return nullptr;
+  return new_map;
 }
 
 
@@ -4166,6 +4265,7 @@
                                         LanguageMode language_mode,
                                         StoreFromKeyed store_mode,
                                         bool* found) {
+  it->UpdateProtector();
   DCHECK(it->IsFound());
   ShouldThrow should_throw =
       is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
@@ -4190,23 +4290,38 @@
         return JSProxy::SetProperty(it->GetHolder<JSProxy>(), it->GetName(),
                                     value, it->GetReceiver(), language_mode);
 
-      case LookupIterator::INTERCEPTOR:
+      case LookupIterator::INTERCEPTOR: {
+        Handle<Map> store_target_map =
+            handle(it->GetStoreTarget()->map(), it->isolate());
         if (it->HolderIsReceiverOrHiddenPrototype()) {
           Maybe<bool> result =
               JSObject::SetPropertyWithInterceptor(it, should_throw, value);
           if (result.IsNothing() || result.FromJust()) return result;
+          // Interceptor modified the store target but failed to set the
+          // property.
+          Utils::ApiCheck(*store_target_map == it->GetStoreTarget()->map(),
+                          it->IsElement() ? "v8::IndexedPropertySetterCallback"
+                                          : "v8::NamedPropertySetterCallback",
+                          "Interceptor silently changed store target.");
         } else {
           Maybe<PropertyAttributes> maybe_attributes =
               JSObject::GetPropertyAttributesWithInterceptor(it);
           if (!maybe_attributes.IsJust()) return Nothing<bool>();
-          if (maybe_attributes.FromJust() == ABSENT) break;
           if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
             return WriteToReadOnlyProperty(it, value, should_throw);
           }
+          // Interceptor modified the store target but failed to set the
+          // property.
+          Utils::ApiCheck(*store_target_map == it->GetStoreTarget()->map(),
+                          it->IsElement() ? "v8::IndexedPropertySetterCallback"
+                                          : "v8::NamedPropertySetterCallback",
+                          "Interceptor silently changed store target.");
+          if (maybe_attributes.FromJust() == ABSENT) break;
           *found = false;
           return Nothing<bool>();
         }
         break;
+      }
 
       case LookupIterator::ACCESSOR: {
         if (it->IsReadOnly()) {
@@ -4248,7 +4363,6 @@
 Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
                                 LanguageMode language_mode,
                                 StoreFromKeyed store_mode) {
-  it->UpdateProtector();
   if (it->IsFound()) {
     bool found = true;
     Maybe<bool> result =
@@ -4276,7 +4390,6 @@
                                      StoreFromKeyed store_mode) {
   Isolate* isolate = it->isolate();
 
-  it->UpdateProtector();
   if (it->IsFound()) {
     bool found = true;
     Maybe<bool> result =
@@ -4284,6 +4397,8 @@
     if (found) return result;
   }
 
+  it->UpdateProtector();
+
   // The property either doesn't exist on the holder or exists there as a data
   // property.
 
@@ -4295,7 +4410,7 @@
   }
   Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
 
-  LookupIterator::Configuration c = LookupIterator::HIDDEN;
+  LookupIterator::Configuration c = LookupIterator::OWN;
   LookupIterator own_lookup =
       it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
                       : LookupIterator(receiver, it->name(), c);
@@ -4419,14 +4534,6 @@
   // Store on the holder which may be hidden behind the receiver.
   DCHECK(it->HolderIsReceiverOrHiddenPrototype());
 
-  // Old value for the observation change record.
-  // Fetch before transforming the object since the encoding may become
-  // incompatible with what's cached in |it|.
-  bool is_observed = receiver->map()->is_observed() &&
-                     (it->IsElement() || !it->name()->IsPrivate());
-  MaybeHandle<Object> maybe_old;
-  if (is_observed) maybe_old = it->GetDataValue();
-
   Handle<Object> to_assign = value;
   // Convert the incoming value to a number for storing into typed arrays.
   if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
@@ -4449,15 +4556,6 @@
   // Write the property value.
   it->WriteDataValue(to_assign);
 
-  // Send the change record if there are observers.
-  if (is_observed && !value->SameValue(*maybe_old.ToHandleChecked())) {
-    RETURN_ON_EXCEPTION_VALUE(
-        it->isolate(),
-        JSObject::EnqueueChangeRecord(receiver, "update", it->GetName(),
-                                      maybe_old.ToHandleChecked()),
-        Nothing<bool>());
-  }
-
 #if VERIFY_HEAP
   if (FLAG_verify_heap) {
     receiver->JSObjectVerify();
@@ -4467,47 +4565,6 @@
 }
 
 
-MUST_USE_RESULT static MaybeHandle<Object> BeginPerformSplice(
-    Handle<JSArray> object) {
-  Isolate* isolate = object->GetIsolate();
-  HandleScope scope(isolate);
-  Handle<Object> args[] = {object};
-
-  return Execution::Call(
-      isolate, Handle<JSFunction>(isolate->observers_begin_perform_splice()),
-      isolate->factory()->undefined_value(), arraysize(args), args);
-}
-
-
-MUST_USE_RESULT static MaybeHandle<Object> EndPerformSplice(
-    Handle<JSArray> object) {
-  Isolate* isolate = object->GetIsolate();
-  HandleScope scope(isolate);
-  Handle<Object> args[] = {object};
-
-  return Execution::Call(
-      isolate, Handle<JSFunction>(isolate->observers_end_perform_splice()),
-      isolate->factory()->undefined_value(), arraysize(args), args);
-}
-
-
-MUST_USE_RESULT static MaybeHandle<Object> EnqueueSpliceRecord(
-    Handle<JSArray> object, uint32_t index, Handle<JSArray> deleted,
-    uint32_t add_count) {
-  Isolate* isolate = object->GetIsolate();
-  HandleScope scope(isolate);
-  Handle<Object> index_object = isolate->factory()->NewNumberFromUint(index);
-  Handle<Object> add_count_object =
-      isolate->factory()->NewNumberFromUint(add_count);
-
-  Handle<Object> args[] = {object, index_object, deleted, add_count_object};
-
-  return Execution::Call(
-      isolate, Handle<JSFunction>(isolate->observers_enqueue_splice()),
-      isolate->factory()->undefined_value(), arraysize(args), args);
-}
-
-
 Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
                                     PropertyAttributes attributes,
                                     ShouldThrow should_throw,
@@ -4562,6 +4619,7 @@
     JSObject::ValidateElements(receiver);
     return result;
   } else {
+    it->UpdateProtector();
     // Migrate to the most up-to-date map that will be able to store |value|
     // under it->name() with |attributes|.
     it->PrepareTransitionToDataProperty(receiver, value, attributes,
@@ -4578,13 +4636,6 @@
       it->WriteDataValue(value);
     }
 
-    // Send the change record if there are observers.
-    if (receiver->map()->is_observed() && !it->name()->IsPrivate()) {
-      RETURN_ON_EXCEPTION_VALUE(isolate, JSObject::EnqueueChangeRecord(
-                                             receiver, "add", it->name(),
-                                             it->factory()->the_hole_value()),
-                                Nothing<bool>());
-    }
 #if VERIFY_HEAP
     if (FLAG_verify_heap) {
       receiver->JSObjectVerify();
@@ -4745,17 +4796,30 @@
   return false;
 }
 
+Map* Map::FindElementsKindTransitionedMap(MapHandleList* candidates) {
+  DisallowHeapAllocation no_allocation;
+  DisallowDeoptimization no_deoptimization(GetIsolate());
 
-Handle<Map> Map::FindTransitionedMap(Handle<Map> map,
-                                     MapHandleList* candidates) {
-  ElementsKind kind = map->elements_kind();
+  ElementsKind kind = elements_kind();
   bool packed = IsFastPackedElementsKind(kind);
 
   Map* transition = nullptr;
   if (IsTransitionableFastElementsKind(kind)) {
-    for (Map* current = map->ElementsTransitionMap();
-         current != nullptr && current->has_fast_elements();
-         current = current->ElementsTransitionMap()) {
+    // Check the state of the root map.
+    Map* root_map = FindRootMap();
+    if (!EquivalentToForTransition(root_map)) return nullptr;
+    root_map = root_map->LookupElementsTransitionMap(kind);
+    DCHECK_NOT_NULL(root_map);
+    // Starting from the next existing elements kind transition try to
+    // replay the property transitions that does not involve instance rewriting
+    // (ElementsTransitionAndStoreStub does not support that).
+    for (root_map = root_map->ElementsTransitionMap();
+         root_map != nullptr && root_map->has_fast_elements();
+         root_map = root_map->ElementsTransitionMap()) {
+      Map* current = root_map->TryReplayPropertyTransitions(this);
+      if (current == nullptr) continue;
+      if (InstancesNeedRewriting(current)) continue;
+
       if (ContainsMap(candidates, current) &&
           (packed || !IsFastPackedElementsKind(current->elements_kind()))) {
         transition = current;
@@ -4763,11 +4827,14 @@
       }
     }
   }
-  return transition == nullptr ? Handle<Map>() : handle(transition);
+  return transition;
 }
 
 
 static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
+  // Ensure we are requested to search elements kind transition "near the root".
+  DCHECK_EQ(map->FindRootMap()->NumberOfOwnDescriptors(),
+            map->NumberOfOwnDescriptors());
   Map* current_map = map;
 
   ElementsKind kind = map->elements_kind();
@@ -4896,7 +4963,7 @@
     return Map::CopyAsElementsKind(map, to_kind, OMIT_TRANSITION);
   }
 
-  return Map::AsElementsKind(map, to_kind);
+  return Map::ReconfigureElementsKind(map, to_kind);
 }
 
 
@@ -4929,7 +4996,7 @@
 Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
                                  Handle<Name> name) {
   DCHECK(!name->IsPrivate());
-  STACK_CHECK(Nothing<bool>());
+  STACK_CHECK(isolate, Nothing<bool>());
   // 1. (Assert)
   // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
   Handle<Object> handler(proxy->handler(), isolate);
@@ -4998,7 +5065,7 @@
                                  LanguageMode language_mode) {
   DCHECK(!name->IsPrivate());
   Isolate* isolate = proxy->GetIsolate();
-  STACK_CHECK(Nothing<bool>());
+  STACK_CHECK(isolate, Nothing<bool>());
   Factory* factory = isolate->factory();
   Handle<String> trap_name = factory->set_string();
   ShouldThrow should_throw =
@@ -5069,7 +5136,7 @@
   ShouldThrow should_throw =
       is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
   Isolate* isolate = proxy->GetIsolate();
-  STACK_CHECK(Nothing<bool>());
+  STACK_CHECK(isolate, Nothing<bool>());
   Factory* factory = isolate->factory();
   Handle<String> trap_name = factory->deleteProperty_string();
 
@@ -5160,6 +5227,44 @@
       handle(function->bound_target_function()));
 }
 
+// static
+MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
+                                             Handle<JSBoundFunction> function) {
+  Handle<String> prefix = isolate->factory()->bound__string();
+  if (!function->bound_target_function()->IsJSFunction()) return prefix;
+  Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
+                            isolate);
+  Handle<Object> target_name = JSFunction::GetName(isolate, target);
+  if (!target_name->IsString()) return prefix;
+  Factory* factory = isolate->factory();
+  return factory->NewConsString(prefix, Handle<String>::cast(target_name));
+}
+
+// static
+Handle<Object> JSFunction::GetName(Isolate* isolate,
+                                   Handle<JSFunction> function) {
+  if (function->shared()->name_should_print_as_anonymous()) {
+    return isolate->factory()->anonymous_string();
+  }
+  return handle(function->shared()->name(), isolate);
+}
+
+// static
+MaybeHandle<Smi> JSFunction::GetLength(Isolate* isolate,
+                                       Handle<JSFunction> function) {
+  int length = 0;
+  if (function->shared()->is_compiled()) {
+    length = function->shared()->length();
+  } else {
+    // If the function isn't compiled yet, the length is not computed
+    // correctly yet. Compile it now and return the right length.
+    if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
+      length = function->shared()->length();
+    }
+    if (isolate->has_pending_exception()) return MaybeHandle<Smi>();
+  }
+  return handle(Smi::FromInt(length), isolate);
+}
 
 // static
 Handle<Context> JSFunction::GetFunctionRealm(Handle<JSFunction> function) {
@@ -5220,7 +5325,7 @@
     } else {
       TransitionElementsKind(object, to_kind);
     }
-    map = Map::AsElementsKind(map, to_kind);
+    map = Map::ReconfigureElementsKind(map, to_kind);
   }
   JSObject::MigrateToMap(object, map);
 }
@@ -5302,8 +5407,6 @@
     ShouldThrow should_throw, AccessorInfoHandling handling) {
   it->UpdateProtector();
   Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
-  bool is_observed = object->map()->is_observed() &&
-                     (it->IsElement() || !it->name()->IsPrivate());
 
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
@@ -5363,14 +5466,6 @@
           it->ReconfigureDataProperty(value, attributes);
         }
 
-        if (is_observed) {
-          RETURN_ON_EXCEPTION_VALUE(
-              it->isolate(),
-              EnqueueChangeRecord(object, "reconfigure", it->GetName(),
-                                  it->factory()->the_hole_value()),
-              Nothing<bool>());
-        }
-
         return Just(true);
       }
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -5391,20 +5486,8 @@
         }
 
         // Reconfigure the data property if the attributes mismatch.
-        Handle<Object> old_value = it->factory()->the_hole_value();
-        if (is_observed) old_value = it->GetDataValue();
-
         it->ReconfigureDataProperty(value, attributes);
 
-        if (is_observed) {
-          if (old_value->SameValue(*value)) {
-            old_value = it->factory()->the_hole_value();
-          }
-          RETURN_ON_EXCEPTION_VALUE(
-              it->isolate(), EnqueueChangeRecord(object, "reconfigure",
-                                                 it->GetName(), old_value),
-              Nothing<bool>());
-        }
         return Just(true);
       }
     }
@@ -5893,162 +5976,6 @@
 }
 
 
-Object* JSObject::GetHiddenProperty(Handle<Name> key) {
-  DisallowHeapAllocation no_gc;
-  DCHECK(key->IsUniqueName());
-  if (IsJSGlobalProxy()) {
-    // For a proxy, use the prototype as target object.
-    PrototypeIterator iter(GetIsolate(), this);
-    // If the proxy is detached, return undefined.
-    if (iter.IsAtEnd()) return GetHeap()->the_hole_value();
-    DCHECK(iter.GetCurrent()->IsJSGlobalObject());
-    return iter.GetCurrent<JSObject>()->GetHiddenProperty(key);
-  }
-  DCHECK(!IsJSGlobalProxy());
-  Object* inline_value = GetHiddenPropertiesHashTable();
-
-  if (inline_value->IsUndefined()) return GetHeap()->the_hole_value();
-
-  ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
-  Object* entry = hashtable->Lookup(key);
-  return entry;
-}
-
-
-Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> object,
-                                           Handle<Name> key,
-                                           Handle<Object> value) {
-  Isolate* isolate = object->GetIsolate();
-
-  DCHECK(key->IsUniqueName());
-  if (object->IsJSGlobalProxy()) {
-    // For a proxy, use the prototype as target object.
-    PrototypeIterator iter(isolate, object);
-    // If the proxy is detached, return undefined.
-    if (iter.IsAtEnd()) return isolate->factory()->undefined_value();
-    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
-    return SetHiddenProperty(PrototypeIterator::GetCurrent<JSObject>(iter), key,
-                             value);
-  }
-  DCHECK(!object->IsJSGlobalProxy());
-
-  Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
-
-  Handle<ObjectHashTable> hashtable =
-      GetOrCreateHiddenPropertiesHashtable(object);
-
-  // If it was found, check if the key is already in the dictionary.
-  Handle<ObjectHashTable> new_table = ObjectHashTable::Put(hashtable, key,
-                                                           value);
-  if (*new_table != *hashtable) {
-    // If adding the key expanded the dictionary (i.e., Add returned a new
-    // dictionary), store it back to the object.
-    SetHiddenPropertiesHashTable(object, new_table);
-  }
-
-  // Return this to mark success.
-  return object;
-}
-
-
-void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
-  Isolate* isolate = object->GetIsolate();
-  DCHECK(key->IsUniqueName());
-
-  if (object->IsJSGlobalProxy()) {
-    PrototypeIterator iter(isolate, object);
-    if (iter.IsAtEnd()) return;
-    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
-    return DeleteHiddenProperty(PrototypeIterator::GetCurrent<JSObject>(iter),
-                                key);
-  }
-
-  Object* inline_value = object->GetHiddenPropertiesHashTable();
-
-  if (inline_value->IsUndefined()) return;
-
-  Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
-  bool was_present = false;
-  ObjectHashTable::Remove(hashtable, key, &was_present);
-}
-
-
-bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
-  Isolate* isolate = object->GetIsolate();
-  Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
-  LookupIterator it(object, hidden, object);
-  Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
-  // Cannot get an exception since the hidden_properties_symbol isn't exposed to
-  // JS.
-  DCHECK(maybe.IsJust());
-  return maybe.FromJust() != ABSENT;
-}
-
-
-Object* JSObject::GetHiddenPropertiesHashTable() {
-  DCHECK(!IsJSGlobalProxy());
-  if (HasFastProperties()) {
-    // If the object has fast properties, check whether the first slot
-    // in the descriptor array matches the hidden string. Since the
-    // hidden strings hash code is zero (and no other name has hash
-    // code zero) it will always occupy the first entry if present.
-    DescriptorArray* descriptors = this->map()->instance_descriptors();
-    if (descriptors->number_of_descriptors() > 0) {
-      int sorted_index = descriptors->GetSortedKeyIndex(0);
-      if (descriptors->GetKey(sorted_index) ==
-              GetHeap()->hidden_properties_symbol() &&
-          sorted_index < map()->NumberOfOwnDescriptors()) {
-        DCHECK(descriptors->GetType(sorted_index) == DATA);
-        DCHECK(descriptors->GetDetails(sorted_index).representation().
-               IsCompatibleForLoad(Representation::Tagged()));
-        FieldIndex index = FieldIndex::ForDescriptor(this->map(),
-                                                     sorted_index);
-        return this->RawFastPropertyAt(index);
-      } else {
-        return GetHeap()->undefined_value();
-      }
-    } else {
-      return GetHeap()->undefined_value();
-    }
-  } else {
-    Isolate* isolate = GetIsolate();
-    Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
-    Handle<JSObject> receiver(this, isolate);
-    LookupIterator it(receiver, hidden, receiver);
-    // Access check is always skipped for the hidden string anyways.
-    return *GetDataProperty(&it);
-  }
-}
-
-Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
-    Handle<JSObject> object) {
-  Isolate* isolate = object->GetIsolate();
-
-  static const int kInitialCapacity = 4;
-  Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
-  if (inline_value->IsHashTable()) {
-    return Handle<ObjectHashTable>::cast(inline_value);
-  }
-
-  Handle<ObjectHashTable> hashtable = ObjectHashTable::New(
-      isolate, kInitialCapacity, USE_CUSTOM_MINIMUM_CAPACITY);
-
-  DCHECK(inline_value->IsUndefined());
-  SetHiddenPropertiesHashTable(object, hashtable);
-  return hashtable;
-}
-
-
-Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
-                                                      Handle<Object> value) {
-  DCHECK(!object->IsJSGlobalProxy());
-  Isolate* isolate = object->GetIsolate();
-  Handle<Symbol> name = isolate->factory()->hidden_properties_symbol();
-  SetOwnPropertyIgnoreAttributes(object, name, value, DONT_ENUM).Assert();
-  return object;
-}
-
-
 Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
                                                     ShouldThrow should_throw) {
   Isolate* isolate = it->isolate();
@@ -6145,11 +6072,6 @@
   }
   Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
 
-  bool is_observed = receiver->map()->is_observed() &&
-                     (it->IsElement() || !it->name()->IsPrivate());
-
-  Handle<Object> old_value = it->factory()->the_hole_value();
-
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
       case LookupIterator::JSPROXY:
@@ -6177,10 +6099,6 @@
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
         return Just(true);
       case LookupIterator::DATA:
-        if (is_observed) {
-          old_value = it->GetDataValue();
-        }
-      // Fall through.
       case LookupIterator::ACCESSOR: {
         if (!it->IsConfigurable()) {
           // Fail if the property is not configurable.
@@ -6195,13 +6113,6 @@
 
         it->Delete();
 
-        if (is_observed) {
-          RETURN_ON_EXCEPTION_VALUE(
-              isolate, JSObject::EnqueueChangeRecord(receiver, "delete",
-                                                     it->GetName(), old_value),
-              Nothing<bool>());
-        }
-
         return Just(true);
       }
     }
@@ -6214,7 +6125,7 @@
 Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
                                       LanguageMode language_mode) {
   LookupIterator it(object->GetIsolate(), object, index, object,
-                    LookupIterator::HIDDEN);
+                    LookupIterator::OWN);
   return DeleteProperty(&it, language_mode);
 }
 
@@ -6222,7 +6133,7 @@
 Maybe<bool> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
                                        Handle<Name> name,
                                        LanguageMode language_mode) {
-  LookupIterator it(object, name, object, LookupIterator::HIDDEN);
+  LookupIterator it(object, name, object, LookupIterator::OWN);
   return DeleteProperty(&it, language_mode);
 }
 
@@ -6231,13 +6142,15 @@
                                                 Handle<Name> name,
                                                 LanguageMode language_mode) {
   LookupIterator it = LookupIterator::PropertyOrElement(
-      name->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
+      name->GetIsolate(), object, name, object, LookupIterator::OWN);
   return DeleteProperty(&it, language_mode);
 }
 
 
 // ES6 7.1.14
-MaybeHandle<Object> ToPropertyKey(Isolate* isolate, Handle<Object> value) {
+// static
+MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
+                                          Handle<Object> value) {
   // 1. Let key be ToPrimitive(argument, hint String).
   MaybeHandle<Object> maybe_key =
       Object::ToPrimitive(value, ToPrimitiveHint::kString);
@@ -6330,7 +6243,7 @@
     // 7b. ReturnIfAbrupt(propDesc).
     bool success = false;
     LookupIterator it = LookupIterator::PropertyOrElement(
-        isolate, props, next_key, &success, LookupIterator::HIDDEN);
+        isolate, props, next_key, &success, LookupIterator::OWN);
     DCHECK(success);
     Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
     if (!maybe.IsJust()) return MaybeHandle<Object>();
@@ -6405,7 +6318,7 @@
   bool success = false;
   DCHECK(key->IsName() || key->IsNumber());  // |key| is a PropertyKey...
   LookupIterator it = LookupIterator::PropertyOrElement(
-      isolate, object, key, &success, LookupIterator::HIDDEN);
+      isolate, object, key, &success, LookupIterator::OWN);
   DCHECK(success);  // ...so creating a LookupIterator can't fail.
 
   // Deal with access checks first.
@@ -6950,10 +6863,7 @@
     // (Not needed.)
   }
   // Most of steps 16 through 19 is implemented by JSArray::SetLength.
-  if (JSArray::ObservableSetLength(a, new_len).is_null()) {
-    DCHECK(isolate->has_pending_exception());
-    return Nothing<bool>();
-  }
+  JSArray::SetLength(a, new_len);
   // Steps 19d-ii, 20.
   if (!new_writable) {
     PropertyDescriptor readonly;
@@ -6985,7 +6895,7 @@
                                        Handle<Object> key,
                                        PropertyDescriptor* desc,
                                        ShouldThrow should_throw) {
-  STACK_CHECK(Nothing<bool>());
+  STACK_CHECK(isolate, Nothing<bool>());
   if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
     return SetPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
                               should_throw);
@@ -7135,7 +7045,7 @@
   bool success = false;
   DCHECK(key->IsName() || key->IsNumber());  // |key| is a PropertyKey...
   LookupIterator it = LookupIterator::PropertyOrElement(
-      isolate, object, key, &success, LookupIterator::HIDDEN);
+      isolate, object, key, &success, LookupIterator::OWN);
   DCHECK(success);  // ...so creating a LookupIterator can't fail.
   return GetOwnPropertyDescriptor(&it, desc);
 }
@@ -7206,7 +7116,7 @@
                                               Handle<Name> name,
                                               PropertyDescriptor* desc) {
   DCHECK(!name->IsPrivate());
-  STACK_CHECK(Nothing<bool>());
+  STACK_CHECK(isolate, Nothing<bool>());
 
   Handle<String> trap_name =
       isolate->factory()->getOwnPropertyDescriptor_string();
@@ -7460,8 +7370,7 @@
 
   if (receiver->IsJSObject()) {
     Handle<JSObject> object = Handle<JSObject>::cast(receiver);
-    if (!object->HasSloppyArgumentsElements() &&
-        !object->map()->is_observed()) {  // Fast path.
+    if (!object->HasSloppyArgumentsElements()) {  // Fast path.
       if (level == SEALED) {
         return JSObject::PreventExtensionsWithTransition<SEALED>(object,
                                                                  should_throw);
@@ -7565,7 +7474,7 @@
 Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
                                        ShouldThrow should_throw) {
   Isolate* isolate = proxy->GetIsolate();
-  STACK_CHECK(Nothing<bool>());
+  STACK_CHECK(isolate, Nothing<bool>());
   Factory* factory = isolate->factory();
   Handle<String> trap_name = factory->preventExtensions_string();
 
@@ -7612,7 +7521,7 @@
                                         ShouldThrow should_throw) {
   Isolate* isolate = object->GetIsolate();
 
-  if (!object->HasSloppyArgumentsElements() && !object->map()->is_observed()) {
+  if (!object->HasSloppyArgumentsElements()) {
     return PreventExtensionsWithTransition<NONE>(object, should_throw);
   }
 
@@ -7653,13 +7562,6 @@
   JSObject::MigrateToMap(object, new_map);
   DCHECK(!object->map()->is_extensible());
 
-  if (object->map()->is_observed()) {
-    RETURN_ON_EXCEPTION_VALUE(
-        isolate,
-        EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
-                            isolate->factory()->the_hole_value()),
-        Nothing<bool>());
-  }
   return Just(true);
 }
 
@@ -7674,7 +7576,7 @@
 
 Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
   Isolate* isolate = proxy->GetIsolate();
-  STACK_CHECK(Nothing<bool>());
+  STACK_CHECK(isolate, Nothing<bool>());
   Factory* factory = isolate->factory();
   Handle<String> trap_name = factory->isExtensible_string();
 
@@ -7760,7 +7662,6 @@
 
   // Sealing/freezing sloppy arguments should be handled elsewhere.
   DCHECK(!object->HasSloppyArgumentsElements());
-  DCHECK(!object->map()->is_observed());
 
   Isolate* isolate = object->GetIsolate();
   if (object->IsAccessCheckNeeded() &&
@@ -7879,28 +7780,6 @@
 }
 
 
-void JSObject::SetObserved(Handle<JSObject> object) {
-  DCHECK(!object->IsJSGlobalProxy());
-  DCHECK(!object->IsJSGlobalObject());
-  Isolate* isolate = object->GetIsolate();
-  Handle<Map> new_map;
-  Handle<Map> old_map(object->map(), isolate);
-  DCHECK(!old_map->is_observed());
-  Map* transition = TransitionArray::SearchSpecial(
-      *old_map, isolate->heap()->observed_symbol());
-  if (transition != NULL) {
-    new_map = handle(transition, isolate);
-    DCHECK(new_map->is_observed());
-  } else if (TransitionArray::CanHaveMoreTransitions(old_map)) {
-    new_map = Map::CopyForObserved(old_map);
-  } else {
-    new_map = Map::Copy(old_map, "SlowObserved");
-    new_map->set_is_observed();
-  }
-  JSObject::MigrateToMap(object, new_map);
-}
-
-
 Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
                                         Representation representation,
                                         FieldIndex index) {
@@ -8031,7 +7910,7 @@
           ONLY_WRITABLE | ONLY_ENUMERABLE | ONLY_CONFIGURABLE);
       KeyAccumulator accumulator(isolate, OWN_ONLY, filter);
       accumulator.NextPrototype();
-      copy->CollectOwnPropertyNames(&accumulator, filter);
+      accumulator.CollectOwnPropertyNames(copy);
       Handle<FixedArray> names = accumulator.GetKeys();
       for (int i = 0; i < names->length(); i++) {
         DCHECK(names->get(i)->IsName());
@@ -8295,28 +8174,6 @@
   return true;
 }
 
-// Tests for the fast common case for property enumeration:
-// - This object and all prototypes has an enum cache (which means that
-//   it is no proxy, has no interceptors and needs no access checks).
-// - This object has no elements.
-// - No prototype has enumerable properties/elements.
-bool JSReceiver::IsSimpleEnum() {
-  for (PrototypeIterator iter(GetIsolate(), this,
-                              PrototypeIterator::START_AT_RECEIVER);
-       !iter.IsAtEnd(); iter.Advance()) {
-    if (!iter.GetCurrent()->IsJSObject()) return false;
-    JSObject* current = iter.GetCurrent<JSObject>();
-    int enum_length = current->map()->EnumLength();
-    if (enum_length == kInvalidEnumCacheSentinel) return false;
-    if (current->IsAccessCheckNeeded()) return false;
-    DCHECK(!current->HasNamedInterceptor());
-    DCHECK(!current->HasIndexedInterceptor());
-    if (current->HasEnumerableElements()) return false;
-    if (current != this && enum_length != 0) return false;
-  }
-  return true;
-}
-
 
 int Map::NumberOfDescribedProperties(DescriptorFlag which,
                                      PropertyFilter filter) {
@@ -8350,23 +8207,6 @@
 }
 
 
-static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
-  int len = array->length();
-  for (int i = 0; i < len; i++) {
-    Object* e = array->get(i);
-    if (!(e->IsName() || e->IsNumber())) return false;
-  }
-  return true;
-}
-
-
-static Handle<FixedArray> ReduceFixedArrayTo(
-    Handle<FixedArray> array, int length) {
-  DCHECK_LE(length, array->length());
-  if (array->length() == length) return array;
-  return array->GetIsolate()->factory()->CopyFixedArrayUpTo(array, length);
-}
-
 bool Map::OnlyHasSimpleProperties() {
   // Wrapped string elements aren't explicitly stored in the elements backing
   // store, but are loaded indirectly from the underlying string.
@@ -8375,419 +8215,13 @@
          !has_hidden_prototype() && !is_dictionary_map();
 }
 
-// static
-Handle<FixedArray> JSObject::GetFastEnumPropertyKeys(Isolate* isolate,
-                                                     Handle<JSObject> object) {
-  Handle<Map> map(object->map());
-  bool cache_enum_length = map->OnlyHasSimpleProperties();
-
-  Handle<DescriptorArray> descs =
-      Handle<DescriptorArray>(map->instance_descriptors(), isolate);
-  int own_property_count = map->EnumLength();
-  // If the enum length of the given map is set to kInvalidEnumCache, this
-  // means that the map itself has never used the present enum cache. The
-  // first step to using the cache is to set the enum length of the map by
-  // counting the number of own descriptors that are ENUMERABLE_STRINGS.
-  if (own_property_count == kInvalidEnumCacheSentinel) {
-    own_property_count =
-        map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
-  } else {
-    DCHECK(
-        own_property_count ==
-        map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS));
-  }
-
-  if (descs->HasEnumCache()) {
-    Handle<FixedArray> keys(descs->GetEnumCache(), isolate);
-    // In case the number of properties required in the enum are actually
-    // present, we can reuse the enum cache. Otherwise, this means that the
-    // enum cache was generated for a previous (smaller) version of the
-    // Descriptor Array. In that case we regenerate the enum cache.
-    if (own_property_count <= keys->length()) {
-      isolate->counters()->enum_cache_hits()->Increment();
-      if (cache_enum_length) map->SetEnumLength(own_property_count);
-      return ReduceFixedArrayTo(keys, own_property_count);
-    }
-  }
-
-  if (descs->IsEmpty()) {
-    isolate->counters()->enum_cache_hits()->Increment();
-    if (cache_enum_length) map->SetEnumLength(0);
-    return isolate->factory()->empty_fixed_array();
-  }
-
-  isolate->counters()->enum_cache_misses()->Increment();
-
-  Handle<FixedArray> storage =
-      isolate->factory()->NewFixedArray(own_property_count);
-  Handle<FixedArray> indices =
-      isolate->factory()->NewFixedArray(own_property_count);
-
-  int size = map->NumberOfOwnDescriptors();
-  int index = 0;
-
-  for (int i = 0; i < size; i++) {
-    PropertyDetails details = descs->GetDetails(i);
-    if (details.IsDontEnum()) continue;
-    Object* key = descs->GetKey(i);
-    if (key->IsSymbol()) continue;
-    storage->set(index, key);
-    if (!indices.is_null()) {
-      if (details.type() != DATA) {
-        indices = Handle<FixedArray>();
-      } else {
-        FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
-        int load_by_field_index = field_index.GetLoadByFieldIndex();
-        indices->set(index, Smi::FromInt(load_by_field_index));
-      }
-    }
-    index++;
-  }
-  DCHECK(index == storage->length());
-
-  DescriptorArray::SetEnumCache(descs, isolate, storage, indices);
-  if (cache_enum_length) {
-    map->SetEnumLength(own_property_count);
-  }
-  return storage;
-}
-
-
-Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object) {
-  Isolate* isolate = object->GetIsolate();
-  if (object->HasFastProperties()) {
-    return GetFastEnumPropertyKeys(isolate, object);
-  } else if (object->IsJSGlobalObject()) {
-    Handle<GlobalDictionary> dictionary(object->global_dictionary());
-    int length = dictionary->NumberOfEnumElements();
-    if (length == 0) {
-      return isolate->factory()->empty_fixed_array();
-    }
-    Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
-    dictionary->CopyEnumKeysTo(*storage);
-    return storage;
-  } else {
-    Handle<NameDictionary> dictionary(object->property_dictionary());
-    int length = dictionary->NumberOfEnumElements();
-    if (length == 0) {
-      return isolate->factory()->empty_fixed_array();
-    }
-    Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
-    dictionary->CopyEnumKeysTo(*storage);
-    return storage;
-  }
-}
-
-
-enum IndexedOrNamed { kIndexed, kNamed };
-
-
-// Returns |true| on success, |nothing| on exception.
-template <class Callback, IndexedOrNamed type>
-static Maybe<bool> GetKeysFromInterceptor(Isolate* isolate,
-                                          Handle<JSReceiver> receiver,
-                                          Handle<JSObject> object,
-                                          PropertyFilter filter,
-                                          KeyAccumulator* accumulator) {
-  if (type == kIndexed) {
-    if (!object->HasIndexedInterceptor()) return Just(true);
-  } else {
-    if (!object->HasNamedInterceptor()) return Just(true);
-  }
-  Handle<InterceptorInfo> interceptor(type == kIndexed
-                                          ? object->GetIndexedInterceptor()
-                                          : object->GetNamedInterceptor(),
-                                      isolate);
-  if ((filter & ONLY_ALL_CAN_READ) && !interceptor->all_can_read()) {
-    return Just(true);
-  }
-  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
-                                 *object, Object::DONT_THROW);
-  Handle<JSObject> result;
-  if (!interceptor->enumerator()->IsUndefined()) {
-    Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
-    const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
-                                           : "interceptor-named-enum";
-    LOG(isolate, ApiObjectAccess(log_tag, *object));
-    result = args.Call(enum_fun);
-  }
-  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
-  if (result.is_null()) return Just(true);
-  DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
-  // The accumulator takes care of string/symbol filtering.
-  if (type == kIndexed) {
-    accumulator->AddElementKeysFromInterceptor(result);
-  } else {
-    accumulator->AddKeys(result, DO_NOT_CONVERT);
-  }
-  return Just(true);
-}
-
-
-// Returns |true| on success, |false| if prototype walking should be stopped,
-// |nothing| if an exception was thrown.
-static Maybe<bool> GetKeysFromJSObject(Isolate* isolate,
-                                       Handle<JSReceiver> receiver,
-                                       Handle<JSObject> object,
-                                       PropertyFilter* filter,
-                                       KeyCollectionType type,
-                                       KeyAccumulator* accumulator) {
-  accumulator->NextPrototype();
-  // Check access rights if required.
-  if (object->IsAccessCheckNeeded() &&
-      !isolate->MayAccess(handle(isolate->context()), object)) {
-    // The cross-origin spec says that [[Enumerate]] shall return an empty
-    // iterator when it doesn't have access...
-    if (type == INCLUDE_PROTOS) {
-      return Just(false);
-    }
-    // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
-    DCHECK_EQ(OWN_ONLY, type);
-    *filter = static_cast<PropertyFilter>(*filter | ONLY_ALL_CAN_READ);
-  }
-
-  JSObject::CollectOwnElementKeys(object, accumulator, *filter);
-
-  // Add the element keys from the interceptor.
-  Maybe<bool> success =
-      GetKeysFromInterceptor<v8::IndexedPropertyEnumeratorCallback, kIndexed>(
-          isolate, receiver, object, *filter, accumulator);
-  MAYBE_RETURN(success, Nothing<bool>());
-
-  if (*filter == ENUMERABLE_STRINGS) {
-    Handle<FixedArray> enum_keys = JSObject::GetEnumPropertyKeys(object);
-    accumulator->AddKeys(enum_keys, DO_NOT_CONVERT);
-  } else {
-    object->CollectOwnPropertyNames(accumulator, *filter);
-  }
-
-  // Add the property keys from the interceptor.
-  success = GetKeysFromInterceptor<v8::GenericNamedPropertyEnumeratorCallback,
-                                   kNamed>(isolate, receiver, object, *filter,
-                                           accumulator);
-  MAYBE_RETURN(success, Nothing<bool>());
-  return Just(true);
-}
-
-
-// Helper function for JSReceiver::GetKeys() below. Can be called recursively.
-// Returns |true| or |nothing|.
-static Maybe<bool> GetKeys_Internal(Isolate* isolate,
-                                    Handle<JSReceiver> receiver,
-                                    Handle<JSReceiver> object,
-                                    KeyCollectionType type,
-                                    PropertyFilter filter,
-                                    KeyAccumulator* accumulator) {
-  // Proxies have no hidden prototype and we should not trigger the
-  // [[GetPrototypeOf]] trap on the last iteration when using
-  // AdvanceFollowingProxies.
-  if (type == OWN_ONLY && object->IsJSProxy()) {
-    MAYBE_RETURN(JSProxy::OwnPropertyKeys(isolate, receiver,
-                                          Handle<JSProxy>::cast(object), filter,
-                                          accumulator),
-                 Nothing<bool>());
-    return Just(true);
-  }
-
-  PrototypeIterator::WhereToEnd end = type == OWN_ONLY
-                                          ? PrototypeIterator::END_AT_NON_HIDDEN
-                                          : PrototypeIterator::END_AT_NULL;
-  for (PrototypeIterator iter(isolate, object,
-                              PrototypeIterator::START_AT_RECEIVER, end);
-       !iter.IsAtEnd();) {
-    Handle<JSReceiver> current =
-        PrototypeIterator::GetCurrent<JSReceiver>(iter);
-    Maybe<bool> result = Just(false);  // Dummy initialization.
-    if (current->IsJSProxy()) {
-      result = JSProxy::OwnPropertyKeys(isolate, receiver,
-                                        Handle<JSProxy>::cast(current), filter,
-                                        accumulator);
-    } else {
-      DCHECK(current->IsJSObject());
-      result = GetKeysFromJSObject(isolate, receiver,
-                                   Handle<JSObject>::cast(current), &filter,
-                                   type, accumulator);
-    }
-    MAYBE_RETURN(result, Nothing<bool>());
-    if (!result.FromJust()) break;  // |false| means "stop iterating".
-    // Iterate through proxies but ignore access checks for the ALL_CAN_READ
-    // case on API objects for OWN_ONLY keys handlede in GgetKeysFromJSObject.
-    if (!iter.AdvanceFollowingProxiesIgnoringAccessChecks()) {
-      return Nothing<bool>();
-    }
-  }
-  return Just(true);
-}
-
-
-// ES6 9.5.12
-// Returns |true| on success, |nothing| in case of exception.
-// static
-Maybe<bool> JSProxy::OwnPropertyKeys(Isolate* isolate,
-                                     Handle<JSReceiver> receiver,
-                                     Handle<JSProxy> proxy,
-                                     PropertyFilter filter,
-                                     KeyAccumulator* accumulator) {
-  STACK_CHECK(Nothing<bool>());
-  // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
-  Handle<Object> handler(proxy->handler(), isolate);
-  // 2. If handler is null, throw a TypeError exception.
-  // 3. Assert: Type(handler) is Object.
-  if (proxy->IsRevoked()) {
-    isolate->Throw(*isolate->factory()->NewTypeError(
-        MessageTemplate::kProxyRevoked, isolate->factory()->ownKeys_string()));
-    return Nothing<bool>();
-  }
-  // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
-  Handle<JSReceiver> target(proxy->target(), isolate);
-  // 5. Let trap be ? GetMethod(handler, "ownKeys").
-  Handle<Object> trap;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
-                                       isolate->factory()->ownKeys_string()),
-      Nothing<bool>());
-  // 6. If trap is undefined, then
-  if (trap->IsUndefined()) {
-    // 6a. Return target.[[OwnPropertyKeys]]().
-    return GetKeys_Internal(isolate, receiver, target, OWN_ONLY, filter,
-                            accumulator);
-  }
-  // 7. Let trapResultArray be Call(trap, handler, «target»).
-  Handle<Object> trap_result_array;
-  Handle<Object> args[] = {target};
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, trap_result_array,
-      Execution::Call(isolate, trap, handler, arraysize(args), args),
-      Nothing<bool>());
-  // 8. Let trapResult be ? CreateListFromArrayLike(trapResultArray,
-  //    «String, Symbol»).
-  Handle<FixedArray> trap_result;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, trap_result,
-      Object::CreateListFromArrayLike(isolate, trap_result_array,
-                                      ElementTypes::kStringAndSymbol),
-      Nothing<bool>());
-  // 9. Let extensibleTarget be ? IsExtensible(target).
-  Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
-  MAYBE_RETURN(maybe_extensible, Nothing<bool>());
-  bool extensible_target = maybe_extensible.FromJust();
-  // 10. Let targetKeys be ? target.[[OwnPropertyKeys]]().
-  Handle<FixedArray> target_keys;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_keys,
-                                   JSReceiver::OwnPropertyKeys(target),
-                                   Nothing<bool>());
-  // 11. (Assert)
-  // 12. Let targetConfigurableKeys be an empty List.
-  // To save memory, we're re-using target_keys and will modify it in-place.
-  Handle<FixedArray> target_configurable_keys = target_keys;
-  // 13. Let targetNonconfigurableKeys be an empty List.
-  Handle<FixedArray> target_nonconfigurable_keys =
-      isolate->factory()->NewFixedArray(target_keys->length());
-  int nonconfigurable_keys_length = 0;
-  // 14. Repeat, for each element key of targetKeys:
-  for (int i = 0; i < target_keys->length(); ++i) {
-    // 14a. Let desc be ? target.[[GetOwnProperty]](key).
-    PropertyDescriptor desc;
-    Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
-        isolate, target, handle(target_keys->get(i), isolate), &desc);
-    MAYBE_RETURN(found, Nothing<bool>());
-    // 14b. If desc is not undefined and desc.[[Configurable]] is false, then
-    if (found.FromJust() && !desc.configurable()) {
-      // 14b i. Append key as an element of targetNonconfigurableKeys.
-      target_nonconfigurable_keys->set(nonconfigurable_keys_length,
-                                       target_keys->get(i));
-      nonconfigurable_keys_length++;
-      // The key was moved, null it out in the original list.
-      target_keys->set(i, Smi::FromInt(0));
-    } else {
-      // 14c. Else,
-      // 14c i. Append key as an element of targetConfigurableKeys.
-      // (No-op, just keep it in |target_keys|.)
-    }
-  }
-  accumulator->NextPrototype();  // Prepare for accumulating keys.
-  // 15. If extensibleTarget is true and targetNonconfigurableKeys is empty,
-  //     then:
-  if (extensible_target && nonconfigurable_keys_length == 0) {
-    // 15a. Return trapResult.
-    return accumulator->AddKeysFromProxy(proxy, trap_result);
-  }
-  // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
-  Zone set_zone(isolate->allocator());
-  const int kPresent = 1;
-  const int kGone = 0;
-  IdentityMap<int> unchecked_result_keys(isolate->heap(), &set_zone);
-  int unchecked_result_keys_size = 0;
-  for (int i = 0; i < trap_result->length(); ++i) {
-    DCHECK(trap_result->get(i)->IsUniqueName());
-    Object* key = trap_result->get(i);
-    int* entry = unchecked_result_keys.Get(key);
-    if (*entry != kPresent) {
-      *entry = kPresent;
-      unchecked_result_keys_size++;
-    }
-  }
-  // 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
-  for (int i = 0; i < nonconfigurable_keys_length; ++i) {
-    Object* key = target_nonconfigurable_keys->get(i);
-    // 17a. If key is not an element of uncheckedResultKeys, throw a
-    //      TypeError exception.
-    int* found = unchecked_result_keys.Find(key);
-    if (found == nullptr || *found == kGone) {
-      isolate->Throw(*isolate->factory()->NewTypeError(
-          MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate)));
-      return Nothing<bool>();
-    }
-    // 17b. Remove key from uncheckedResultKeys.
-    *found = kGone;
-    unchecked_result_keys_size--;
-  }
-  // 18. If extensibleTarget is true, return trapResult.
-  if (extensible_target) {
-    return accumulator->AddKeysFromProxy(proxy, trap_result);
-  }
-  // 19. Repeat, for each key that is an element of targetConfigurableKeys:
-  for (int i = 0; i < target_configurable_keys->length(); ++i) {
-    Object* key = target_configurable_keys->get(i);
-    if (key->IsSmi()) continue;  // Zapped entry, was nonconfigurable.
-    // 19a. If key is not an element of uncheckedResultKeys, throw a
-    //      TypeError exception.
-    int* found = unchecked_result_keys.Find(key);
-    if (found == nullptr || *found == kGone) {
-      isolate->Throw(*isolate->factory()->NewTypeError(
-          MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate)));
-      return Nothing<bool>();
-    }
-    // 19b. Remove key from uncheckedResultKeys.
-    *found = kGone;
-    unchecked_result_keys_size--;
-  }
-  // 20. If uncheckedResultKeys is not empty, throw a TypeError exception.
-  if (unchecked_result_keys_size != 0) {
-    DCHECK_GT(unchecked_result_keys_size, 0);
-    isolate->Throw(*isolate->factory()->NewTypeError(
-        MessageTemplate::kProxyOwnKeysNonExtensible));
-    return Nothing<bool>();
-  }
-  // 21. Return trapResult.
-  return accumulator->AddKeysFromProxy(proxy, trap_result);
-}
-
 MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
                                             KeyCollectionType type,
                                             PropertyFilter filter,
                                             GetKeysConversion keys_conversion,
                                             bool filter_proxy_keys) {
-  USE(ContainsOnlyValidKeys);
-  Isolate* isolate = object->GetIsolate();
-  KeyAccumulator accumulator(isolate, type, filter);
-  accumulator.set_filter_proxy_keys(filter_proxy_keys);
-  MAYBE_RETURN(
-      GetKeys_Internal(isolate, object, object, type, filter, &accumulator),
-      MaybeHandle<FixedArray>());
-  Handle<FixedArray> keys = accumulator.GetKeys(keys_conversion);
-  DCHECK(ContainsOnlyValidKeys(keys));
-  return keys;
+  return KeyAccumulator::GetKeys(object, type, filter, keys_conversion,
+                                 filter_proxy_keys);
 }
 
 MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
@@ -8881,11 +8315,9 @@
   PropertyFilter key_filter =
       static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
   KeyAccumulator accumulator(isolate, OWN_ONLY, key_filter);
-  MAYBE_RETURN(GetKeys_Internal(isolate, object, object, OWN_ONLY, key_filter,
-                                &accumulator),
+  MAYBE_RETURN(accumulator.CollectKeys(object, object),
                MaybeHandle<FixedArray>());
   Handle<FixedArray> keys = accumulator.GetKeys(CONVERT_TO_STRING);
-  DCHECK(ContainsOnlyValidKeys(keys));
 
   values_or_entries = isolate->factory()->NewFixedArray(keys->length());
   int length = 0;
@@ -8970,7 +8402,7 @@
   Isolate* isolate = object->GetIsolate();
 
   LookupIterator it = LookupIterator::PropertyOrElement(
-      isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+      isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
   return DefineAccessor(&it, getter, setter, attributes);
 }
 
@@ -8998,40 +8430,11 @@
     return it->factory()->undefined_value();
   }
 
-  Handle<Object> old_value = isolate->factory()->the_hole_value();
-  bool is_observed = object->map()->is_observed() &&
-                     (it->IsElement() || !it->name()->IsPrivate());
-  bool preexists = false;
-  if (is_observed) {
-    CHECK(GetPropertyAttributes(it).IsJust());
-    preexists = it->IsFound();
-    if (preexists && (it->state() == LookupIterator::DATA ||
-                      it->GetAccessors()->IsAccessorInfo())) {
-      old_value = Object::GetProperty(it).ToHandleChecked();
-    }
-  }
-
   DCHECK(getter->IsCallable() || getter->IsUndefined() || getter->IsNull() ||
          getter->IsFunctionTemplateInfo());
   DCHECK(setter->IsCallable() || setter->IsUndefined() || setter->IsNull() ||
          getter->IsFunctionTemplateInfo());
-  // At least one of the accessors needs to be a new value.
-  DCHECK(!getter->IsNull() || !setter->IsNull());
-  if (!getter->IsNull()) {
-    it->TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
-  }
-  if (!setter->IsNull()) {
-    it->TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
-  }
-
-  if (is_observed) {
-    // Make sure the top context isn't changed.
-    AssertNoContextChange ncc(isolate);
-    const char* type = preexists ? "reconfigure" : "add";
-    RETURN_ON_EXCEPTION(
-        isolate, EnqueueChangeRecord(object, type, it->GetName(), old_value),
-        Object);
-  }
+  it->TransitionToAccessorProperty(getter, setter, attributes);
 
   return isolate->factory()->undefined_value();
 }
@@ -9043,7 +8446,7 @@
   Handle<Name> name(Name::cast(info->name()), isolate);
 
   LookupIterator it = LookupIterator::PropertyOrElement(
-      isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+      isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
 
   // Duplicate ACCESS_CHECK outside of GetPropertyAttributes for the case that
   // the FailedAccessCheckCallbackFunction doesn't throw an exception.
@@ -9077,53 +8480,6 @@
   return object;
 }
 
-
-MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
-                                          Handle<Name> name,
-                                          AccessorComponent component) {
-  Isolate* isolate = object->GetIsolate();
-
-  // Make sure that the top context does not change when doing callbacks or
-  // interceptor calls.
-  AssertNoContextChange ncc(isolate);
-
-  LookupIterator it = LookupIterator::PropertyOrElement(
-      isolate, object, name, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
-
-  for (; it.IsFound(); it.Next()) {
-    switch (it.state()) {
-      case LookupIterator::INTERCEPTOR:
-      case LookupIterator::NOT_FOUND:
-      case LookupIterator::TRANSITION:
-        UNREACHABLE();
-
-      case LookupIterator::ACCESS_CHECK:
-        if (it.HasAccess()) continue;
-        isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
-        RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-        return isolate->factory()->undefined_value();
-
-      case LookupIterator::JSPROXY:
-        return isolate->factory()->undefined_value();
-
-      case LookupIterator::INTEGER_INDEXED_EXOTIC:
-        return isolate->factory()->undefined_value();
-      case LookupIterator::DATA:
-        continue;
-      case LookupIterator::ACCESSOR: {
-        Handle<Object> maybe_pair = it.GetAccessors();
-        if (maybe_pair->IsAccessorPair()) {
-          return AccessorPair::GetComponent(
-              Handle<AccessorPair>::cast(maybe_pair), component);
-        }
-      }
-    }
-  }
-
-  return isolate->factory()->undefined_value();
-}
-
-
 Object* JSObject::SlowReverseLookup(Object* value) {
   if (HasFastProperties()) {
     int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
@@ -9562,6 +8918,10 @@
                                     TransitionFlag flag) {
   Map* maybe_elements_transition_map = NULL;
   if (flag == INSERT_TRANSITION) {
+    // Ensure we are requested to add elements kind transition "near the root".
+    DCHECK_EQ(map->FindRootMap()->NumberOfOwnDescriptors(),
+              map->NumberOfOwnDescriptors());
+
     maybe_elements_transition_map = map->ElementsTransitionMap();
     DCHECK(maybe_elements_transition_map == NULL ||
            (maybe_elements_transition_map->elements_kind() ==
@@ -9641,30 +9001,6 @@
 }
 
 
-Handle<Map> Map::CopyForObserved(Handle<Map> map) {
-  DCHECK(!map->is_observed());
-
-  Isolate* isolate = map->GetIsolate();
-
-  bool insert_transition =
-      TransitionArray::CanHaveMoreTransitions(map) && !map->is_prototype_map();
-
-  if (insert_transition) {
-    Handle<Map> new_map = CopyForTransition(map, "CopyForObserved");
-    new_map->set_is_observed();
-
-    Handle<Name> name = isolate->factory()->observed_symbol();
-    ConnectTransition(map, new_map, name, SPECIAL_TRANSITION);
-    return new_map;
-  }
-
-  // Create a new free-floating map only if we are not allowed to store it.
-  Handle<Map> new_map = Map::Copy(map, "CopyForObserved");
-  new_map->set_is_observed();
-  return new_map;
-}
-
-
 Handle<Map> Map::CopyForTransition(Handle<Map> map, const char* reason) {
   DCHECK(!map->is_prototype_map());
   Handle<Map> new_map = CopyDropDescriptors(map);
@@ -9825,6 +9161,11 @@
                                           Handle<Object> value,
                                           PropertyAttributes attributes,
                                           StoreFromKeyed store_mode) {
+  RuntimeCallTimerScope stats_scope(
+      *map, map->is_prototype_map()
+                ? &RuntimeCallStats::PrototypeMap_TransitionToDataProperty
+                : &RuntimeCallStats::Map_TransitionToDataProperty);
+
   DCHECK(name->IsUniqueName());
   DCHECK(!map->is_dictionary_map());
 
@@ -9885,7 +9226,7 @@
     // There is no benefit from reconstructing transition tree for maps without
     // back pointers.
     return CopyGeneralizeAllRepresentations(
-        map, descriptor, FORCE_FIELD, kind, attributes,
+        map, map->elements_kind(), descriptor, FORCE_FIELD, kind, attributes,
         "GenAll_AttributesMismatchProtoMap");
   }
 
@@ -9900,13 +9241,20 @@
   return new_map;
 }
 
-Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
+Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
                                               Handle<Name> name, int descriptor,
-                                              AccessorComponent component,
-                                              Handle<Object> accessor,
+                                              Handle<Object> getter,
+                                              Handle<Object> setter,
                                               PropertyAttributes attributes) {
+  RuntimeCallTimerScope stats_scope(
+      isolate,
+      map->is_prototype_map()
+          ? &RuntimeCallStats::PrototypeMap_TransitionToAccessorProperty
+          : &RuntimeCallStats::Map_TransitionToAccessorProperty);
+
+  // At least one of the accessors needs to be a new value.
+  DCHECK(!getter->IsNull() || !setter->IsNull());
   DCHECK(name->IsUniqueName());
-  Isolate* isolate = name->GetIsolate();
 
   // Dictionary maps can always have additional data properties.
   if (map->is_dictionary_map()) return map;
@@ -9935,7 +9283,7 @@
     }
 
     Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
-    if (pair->get(component) != *accessor) {
+    if (!pair->Equals(*getter, *setter)) {
       return Map::Normalize(map, mode, "TransitionToDifferentAccessor");
     }
 
@@ -9962,10 +9310,19 @@
       return Map::Normalize(map, mode, "AccessorsOverwritingNonPair");
     }
 
-    Object* current = Handle<AccessorPair>::cast(maybe_pair)->get(component);
-    if (current == *accessor) return map;
+    Handle<AccessorPair> current_pair = Handle<AccessorPair>::cast(maybe_pair);
+    if (current_pair->Equals(*getter, *setter)) return map;
 
-    if (!current->IsTheHole()) {
+    bool overwriting_accessor = false;
+    if (!getter->IsNull() && !current_pair->get(ACCESSOR_GETTER)->IsNull() &&
+        current_pair->get(ACCESSOR_GETTER) != *getter) {
+      overwriting_accessor = true;
+    }
+    if (!setter->IsNull() && !current_pair->get(ACCESSOR_SETTER)->IsNull() &&
+        current_pair->get(ACCESSOR_SETTER) != *setter) {
+      overwriting_accessor = true;
+    }
+    if (overwriting_accessor) {
       return Map::Normalize(map, mode, "AccessorsOverwritingAccessors");
     }
 
@@ -9977,7 +9334,8 @@
     pair = isolate->factory()->NewAccessorPair();
   }
 
-  pair->set(component, *accessor);
+  pair->SetComponents(*getter, *setter);
+
   TransitionFlag flag = INSERT_TRANSITION;
   AccessorConstantDescriptor new_desc(name, pair, attributes);
   return Map::CopyInsertDescriptor(map, &new_desc, flag);
@@ -10128,215 +9486,22 @@
   Isolate* isolate = map->GetIsolate();
   HandleScope scope(isolate);
   // Allocate the code cache if not present.
-  if (map->code_cache()->IsFixedArray()) {
-    Handle<Object> result = isolate->factory()->NewCodeCache();
+  if (!map->has_code_cache()) {
+    Handle<Object> result =
+        CodeCacheHashTable::New(isolate, CodeCacheHashTable::kInitialSize);
     map->set_code_cache(*result);
   }
 
   // Update the code cache.
-  Handle<CodeCache> code_cache(CodeCache::cast(map->code_cache()), isolate);
-  CodeCache::Update(code_cache, name, code);
-}
-
-
-Object* Map::FindInCodeCache(Name* name, Code::Flags flags) {
-  // Do a lookup if a code cache exists.
-  if (!code_cache()->IsFixedArray()) {
-    return CodeCache::cast(code_cache())->Lookup(name, flags);
-  } else {
-    return GetHeap()->undefined_value();
-  }
-}
-
-
-int Map::IndexInCodeCache(Object* name, Code* code) {
-  // Get the internal index if a code cache exists.
-  if (!code_cache()->IsFixedArray()) {
-    return CodeCache::cast(code_cache())->GetIndex(name, code);
-  }
-  return -1;
-}
-
-
-void Map::RemoveFromCodeCache(Name* name, Code* code, int index) {
-  // No GC is supposed to happen between a call to IndexInCodeCache and
-  // RemoveFromCodeCache so the code cache must be there.
-  DCHECK(!code_cache()->IsFixedArray());
-  CodeCache::cast(code_cache())->RemoveByIndex(name, code, index);
-}
-
-
-void CodeCache::Update(
-    Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
-  // The number of monomorphic stubs for normal load/store/call IC's can grow to
-  // a large number and therefore they need to go into a hash table. They are
-  // used to load global properties from cells.
-  if (code->type() == Code::NORMAL) {
-    // Make sure that a hash table is allocated for the normal load code cache.
-    if (code_cache->normal_type_cache()->IsUndefined()) {
-      Handle<Object> result =
-          CodeCacheHashTable::New(code_cache->GetIsolate(),
-                                  CodeCacheHashTable::kInitialSize);
-      code_cache->set_normal_type_cache(*result);
-    }
-    UpdateNormalTypeCache(code_cache, name, code);
-  } else {
-    DCHECK(code_cache->default_cache()->IsFixedArray());
-    UpdateDefaultCache(code_cache, name, code);
-  }
-}
-
-
-void CodeCache::UpdateDefaultCache(
-    Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
-  Isolate* isolate = code_cache->GetIsolate();
-  // When updating the default code cache we disregard the type encoded in the
-  // flags. This allows call constant stubs to overwrite call field
-  // stubs, etc.
-  Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
-
-  // First check whether we can update existing code cache without
-  // extending it.
-  Handle<FixedArray> cache = handle(code_cache->default_cache());
-  int length = cache->length();
-  {
-    DisallowHeapAllocation no_alloc;
-    int deleted_index = -1;
-    Object* null = isolate->heap()->null_value();
-    Object* undefined = isolate->heap()->undefined_value();
-    DCHECK(name->IsUniqueName());
-    for (int i = 0; i < length; i += kCodeCacheEntrySize) {
-      Object* key = cache->get(i);
-      if (key == null) {
-        if (deleted_index < 0) deleted_index = i;
-        continue;
-      }
-      if (key == undefined) {
-        if (deleted_index >= 0) i = deleted_index;
-        cache->set(i + kCodeCacheEntryNameOffset, *name);
-        cache->set(i + kCodeCacheEntryCodeOffset, *code);
-        return;
-      }
-      DCHECK(key->IsUniqueName());
-      if (*name == key) {
-        Code::Flags found =
-            Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
-        if (Code::RemoveTypeFromFlags(found) == flags) {
-          cache->set(i + kCodeCacheEntryCodeOffset, *code);
-          return;
-        }
-      }
-    }
-
-    // Reached the end of the code cache.  If there were deleted
-    // elements, reuse the space for the first of them.
-    if (deleted_index >= 0) {
-      cache->set(deleted_index + kCodeCacheEntryNameOffset, *name);
-      cache->set(deleted_index + kCodeCacheEntryCodeOffset, *code);
-      return;
-    }
-  }
-
-  // Extend the code cache with some new entries (at least one). Must be a
-  // multiple of the entry size.
-  int new_length = length + (length >> 1) + kCodeCacheEntrySize;
-  new_length = new_length - new_length % kCodeCacheEntrySize;
-  DCHECK((new_length % kCodeCacheEntrySize) == 0);
-  cache = isolate->factory()->CopyFixedArrayAndGrow(cache, new_length - length);
-
-  // Add the (name, code) pair to the new cache.
-  cache->set(length + kCodeCacheEntryNameOffset, *name);
-  cache->set(length + kCodeCacheEntryCodeOffset, *code);
-  code_cache->set_default_cache(*cache);
-}
-
-
-void CodeCache::UpdateNormalTypeCache(
-    Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
-  // Adding a new entry can cause a new cache to be allocated.
-  Handle<CodeCacheHashTable> cache(
-      CodeCacheHashTable::cast(code_cache->normal_type_cache()));
+  Handle<CodeCacheHashTable> cache(CodeCacheHashTable::cast(map->code_cache()),
+                                   isolate);
   Handle<Object> new_cache = CodeCacheHashTable::Put(cache, name, code);
-  code_cache->set_normal_type_cache(*new_cache);
+  map->set_code_cache(*new_cache);
 }
 
-
-Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
-  Object* result = LookupDefaultCache(name, Code::RemoveTypeFromFlags(flags));
-  if (result->IsCode()) {
-    if (Code::cast(result)->flags() == flags) return result;
-    return GetHeap()->undefined_value();
-  }
-  return LookupNormalTypeCache(name, flags);
-}
-
-
-Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) {
-  FixedArray* cache = default_cache();
-  Heap* heap = GetHeap();
-  Object* null = heap->null_value();
-  Object* undefined = heap->undefined_value();
-  int length = cache->length();
-  DCHECK(name->IsUniqueName());
-  for (int i = 0; i < length; i += kCodeCacheEntrySize) {
-    Object* key = cache->get(i + kCodeCacheEntryNameOffset);
-    // Skip deleted elements.
-    if (key == null) continue;
-    if (key == undefined) return key;
-    DCHECK(key->IsUniqueName());
-    if (name == key) {
-      Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
-      if (Code::RemoveTypeFromFlags(code->flags()) == flags) {
-        return code;
-      }
-    }
-  }
-  return GetHeap()->undefined_value();
-}
-
-
-Object* CodeCache::LookupNormalTypeCache(Name* name, Code::Flags flags) {
-  if (!normal_type_cache()->IsUndefined()) {
-    CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
-    return cache->Lookup(name, flags);
-  } else {
-    return GetHeap()->undefined_value();
-  }
-}
-
-
-int CodeCache::GetIndex(Object* name, Code* code) {
-  if (code->type() == Code::NORMAL) {
-    if (normal_type_cache()->IsUndefined()) return -1;
-    CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
-    return cache->GetIndex(Name::cast(name), code->flags());
-  }
-
-  FixedArray* array = default_cache();
-  int len = array->length();
-  for (int i = 0; i < len; i += kCodeCacheEntrySize) {
-    if (array->get(i + kCodeCacheEntryCodeOffset) == code) return i + 1;
-  }
-  return -1;
-}
-
-
-void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
-  if (code->type() == Code::NORMAL) {
-    DCHECK(!normal_type_cache()->IsUndefined());
-    CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
-    DCHECK(cache->GetIndex(Name::cast(name), code->flags()) == index);
-    cache->RemoveByIndex(index);
-  } else {
-    FixedArray* array = default_cache();
-    DCHECK(array->length() >= index && array->get(index)->IsCode());
-    // Use null instead of undefined for deleted elements to distinguish
-    // deleted elements from unused elements.  This distinction is used
-    // when looking up in the cache and when updating the cache.
-    DCHECK_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset);
-    array->set_null(index - 1);  // Name.
-    array->set_null(index);  // Code.
-  }
+Code* Map::LookupInCodeCache(Name* name, Code::Flags flags) {
+  if (!has_code_cache()) return nullptr;
+  return CodeCacheHashTable::cast(code_cache())->Lookup(name, flags);
 }
 
 
@@ -10347,20 +9512,23 @@
 class CodeCacheHashTableKey : public HashTableKey {
  public:
   CodeCacheHashTableKey(Handle<Name> name, Code::Flags flags)
-      : name_(name), flags_(flags), code_() { }
+      : name_(name), flags_(flags), code_() {
+    DCHECK(name_->IsUniqueName());
+  }
 
   CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code)
-      : name_(name), flags_(code->flags()), code_(code) { }
+      : name_(name), flags_(code->flags()), code_(code) {
+    DCHECK(name_->IsUniqueName());
+  }
 
   bool IsMatch(Object* other) override {
-    if (!other->IsFixedArray()) return false;
+    DCHECK(other->IsFixedArray());
     FixedArray* pair = FixedArray::cast(other);
     Name* name = Name::cast(pair->get(0));
     Code::Flags flags = Code::cast(pair->get(1))->flags();
-    if (flags != flags_) {
-      return false;
-    }
-    return name_->Equals(name);
+    if (flags != flags_) return false;
+    DCHECK(name->IsUniqueName());
+    return *name_ == name;
   }
 
   static uint32_t NameFlagsHashHelper(Name* name, Code::Flags flags) {
@@ -10392,15 +9560,6 @@
 };
 
 
-Object* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) {
-  DisallowHeapAllocation no_alloc;
-  CodeCacheHashTableKey key(handle(name), flags);
-  int entry = FindEntry(&key);
-  if (entry == kNotFound) return GetHeap()->undefined_value();
-  return get(EntryToIndex(entry) + 1);
-}
-
-
 Handle<CodeCacheHashTable> CodeCacheHashTable::Put(
     Handle<CodeCacheHashTable> cache, Handle<Name> name, Handle<Code> code) {
   CodeCacheHashTableKey key(name, code);
@@ -10411,179 +9570,18 @@
   Handle<Object> k = key.AsHandle(cache->GetIsolate());
 
   new_cache->set(EntryToIndex(entry), *k);
-  new_cache->set(EntryToIndex(entry) + 1, *code);
   new_cache->ElementAdded();
   return new_cache;
 }
 
-
-int CodeCacheHashTable::GetIndex(Name* name, Code::Flags flags) {
+Code* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) {
   DisallowHeapAllocation no_alloc;
   CodeCacheHashTableKey key(handle(name), flags);
   int entry = FindEntry(&key);
-  return (entry == kNotFound) ? -1 : entry;
+  if (entry == kNotFound) return nullptr;
+  return Code::cast(FixedArray::cast(get(EntryToIndex(entry)))->get(1));
 }
 
-
-void CodeCacheHashTable::RemoveByIndex(int index) {
-  DCHECK(index >= 0);
-  Heap* heap = GetHeap();
-  set(EntryToIndex(index), heap->the_hole_value());
-  set(EntryToIndex(index) + 1, heap->the_hole_value());
-  ElementRemoved();
-}
-
-
-void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> code_cache,
-                                  MapHandleList* maps,
-                                  Code::Flags flags,
-                                  Handle<Code> code) {
-  Isolate* isolate = code_cache->GetIsolate();
-  if (code_cache->cache()->IsUndefined()) {
-    Handle<PolymorphicCodeCacheHashTable> result =
-        PolymorphicCodeCacheHashTable::New(
-            isolate,
-            PolymorphicCodeCacheHashTable::kInitialSize);
-    code_cache->set_cache(*result);
-  } else {
-    // This entry shouldn't be contained in the cache yet.
-    DCHECK(PolymorphicCodeCacheHashTable::cast(code_cache->cache())
-               ->Lookup(maps, flags)->IsUndefined());
-  }
-  Handle<PolymorphicCodeCacheHashTable> hash_table =
-      handle(PolymorphicCodeCacheHashTable::cast(code_cache->cache()));
-  Handle<PolymorphicCodeCacheHashTable> new_cache =
-      PolymorphicCodeCacheHashTable::Put(hash_table, maps, flags, code);
-  code_cache->set_cache(*new_cache);
-}
-
-
-Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
-                                            Code::Flags flags) {
-  if (!cache()->IsUndefined()) {
-    PolymorphicCodeCacheHashTable* hash_table =
-        PolymorphicCodeCacheHashTable::cast(cache());
-    return Handle<Object>(hash_table->Lookup(maps, flags), GetIsolate());
-  } else {
-    return GetIsolate()->factory()->undefined_value();
-  }
-}
-
-
-// Despite their name, object of this class are not stored in the actual
-// hash table; instead they're temporarily used for lookups. It is therefore
-// safe to have a weak (non-owning) pointer to a MapList as a member field.
-class PolymorphicCodeCacheHashTableKey : public HashTableKey {
- public:
-  // Callers must ensure that |maps| outlives the newly constructed object.
-  PolymorphicCodeCacheHashTableKey(MapHandleList* maps, int code_flags)
-      : maps_(maps),
-        code_flags_(code_flags) {}
-
-  bool IsMatch(Object* other) override {
-    MapHandleList other_maps(kDefaultListAllocationSize);
-    int other_flags;
-    FromObject(other, &other_flags, &other_maps);
-    if (code_flags_ != other_flags) return false;
-    if (maps_->length() != other_maps.length()) return false;
-    // Compare just the hashes first because it's faster.
-    int this_hash = MapsHashHelper(maps_, code_flags_);
-    int other_hash = MapsHashHelper(&other_maps, other_flags);
-    if (this_hash != other_hash) return false;
-
-    // Full comparison: for each map in maps_, look for an equivalent map in
-    // other_maps. This implementation is slow, but probably good enough for
-    // now because the lists are short (<= 4 elements currently).
-    for (int i = 0; i < maps_->length(); ++i) {
-      bool match_found = false;
-      for (int j = 0; j < other_maps.length(); ++j) {
-        if (*(maps_->at(i)) == *(other_maps.at(j))) {
-          match_found = true;
-          break;
-        }
-      }
-      if (!match_found) return false;
-    }
-    return true;
-  }
-
-  static uint32_t MapsHashHelper(MapHandleList* maps, int code_flags) {
-    uint32_t hash = code_flags;
-    for (int i = 0; i < maps->length(); ++i) {
-      hash ^= maps->at(i)->Hash();
-    }
-    return hash;
-  }
-
-  uint32_t Hash() override { return MapsHashHelper(maps_, code_flags_); }
-
-  uint32_t HashForObject(Object* obj) override {
-    MapHandleList other_maps(kDefaultListAllocationSize);
-    int other_flags;
-    FromObject(obj, &other_flags, &other_maps);
-    return MapsHashHelper(&other_maps, other_flags);
-  }
-
-  MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
-    // The maps in |maps_| must be copied to a newly allocated FixedArray,
-    // both because the referenced MapList is short-lived, and because C++
-    // objects can't be stored in the heap anyway.
-    Handle<FixedArray> list =
-        isolate->factory()->NewUninitializedFixedArray(maps_->length() + 1);
-    list->set(0, Smi::FromInt(code_flags_));
-    for (int i = 0; i < maps_->length(); ++i) {
-      list->set(i + 1, *maps_->at(i));
-    }
-    return list;
-  }
-
- private:
-  static MapHandleList* FromObject(Object* obj,
-                                   int* code_flags,
-                                   MapHandleList* maps) {
-    FixedArray* list = FixedArray::cast(obj);
-    maps->Rewind(0);
-    *code_flags = Smi::cast(list->get(0))->value();
-    for (int i = 1; i < list->length(); ++i) {
-      maps->Add(Handle<Map>(Map::cast(list->get(i))));
-    }
-    return maps;
-  }
-
-  MapHandleList* maps_;  // weak.
-  int code_flags_;
-  static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
-};
-
-
-Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps,
-                                              int code_kind) {
-  DisallowHeapAllocation no_alloc;
-  PolymorphicCodeCacheHashTableKey key(maps, code_kind);
-  int entry = FindEntry(&key);
-  if (entry == kNotFound) return GetHeap()->undefined_value();
-  return get(EntryToIndex(entry) + 1);
-}
-
-
-Handle<PolymorphicCodeCacheHashTable> PolymorphicCodeCacheHashTable::Put(
-      Handle<PolymorphicCodeCacheHashTable> hash_table,
-      MapHandleList* maps,
-      int code_kind,
-      Handle<Code> code) {
-  PolymorphicCodeCacheHashTableKey key(maps, code_kind);
-  Handle<PolymorphicCodeCacheHashTable> cache =
-      EnsureCapacity(hash_table, 1, &key);
-  int entry = cache->FindInsertionEntry(key.Hash());
-
-  Handle<Object> obj = key.AsHandle(hash_table->GetIsolate());
-  cache->set(EntryToIndex(entry), *obj);
-  cache->set(EntryToIndex(entry) + 1, *code);
-  cache->ElementAdded();
-  return cache;
-}
-
-
 void FixedArray::Shrink(int new_length) {
   DCHECK(0 <= new_length && new_length <= length());
   if (new_length < length()) {
@@ -10939,7 +9937,7 @@
         .ToHandleChecked();
   }
   Isolate* isolate = accessor_pair->GetIsolate();
-  if (accessor->IsTheHole()) {
+  if (accessor->IsNull()) {
     return isolate->factory()->undefined_value();
   }
   return handle(accessor, isolate);
@@ -11268,25 +10266,6 @@
 }
 
 
-base::SmartArrayPointer<uc16> String::ToWideCString(
-    RobustnessFlag robust_flag) {
-  if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
-    return base::SmartArrayPointer<uc16>();
-  }
-  StringCharacterStream stream(this);
-
-  uc16* result = NewArray<uc16>(length() + 1);
-
-  int i = 0;
-  while (stream.HasMore()) {
-    uint16_t character = stream.GetNext();
-    result[i++] = character;
-  }
-  result[i] = 0;
-  return base::SmartArrayPointer<uc16>(result);
-}
-
-
 const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
   return reinterpret_cast<uc16*>(
       reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize) + start;
@@ -12332,15 +11311,18 @@
   return false;
 }
 
+void JSFunction::MarkForBaseline() {
+  Isolate* isolate = GetIsolate();
+  set_code_no_write_barrier(
+      isolate->builtins()->builtin(Builtins::kCompileBaseline));
+  // No write barrier required, since the builtin is part of the root set.
+}
 
 void JSFunction::MarkForOptimization() {
   Isolate* isolate = GetIsolate();
-  // Do not optimize if function contains break points.
-  if (shared()->HasDebugInfo()) return;
   DCHECK(!IsOptimized());
   DCHECK(shared()->allows_lazy_compilation() ||
          !shared()->optimization_disabled());
-  DCHECK(!shared()->HasDebugInfo());
   set_code_no_write_barrier(
       isolate->builtins()->builtin(Builtins::kCompileOptimized));
   // No write barrier required, since the builtin is part of the root set.
@@ -12839,6 +11821,8 @@
 // static
 void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
                        PrototypeOptimizationMode proto_mode) {
+  RuntimeCallTimerScope stats_scope(*map, &RuntimeCallStats::Map_SetPrototype);
+
   bool is_hidden = false;
   if (prototype->IsJSObject()) {
     Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
@@ -12964,8 +11948,13 @@
     new_map->SetConstructor(*value);
     new_map->set_non_instance_prototype(true);
     Isolate* isolate = new_map->GetIsolate();
+
     construct_prototype = handle(
-        function->context()->native_context()->initial_object_prototype(),
+        IsGeneratorFunction(function->shared()->kind())
+            ? function->context()
+                  ->native_context()
+                  ->initial_generator_prototype()
+            : function->context()->native_context()->initial_object_prototype(),
         isolate);
   } else {
     function->map()->set_non_instance_prototype(false);
@@ -13020,50 +12009,52 @@
 
 bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
   switch (instance_type) {
-    case JS_OBJECT_TYPE:
-    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
-    case JS_GENERATOR_OBJECT_TYPE:
-    case JS_MODULE_TYPE:
-    case JS_VALUE_TYPE:
-    case JS_DATE_TYPE:
-    case JS_ARRAY_TYPE:
-    case JS_MESSAGE_OBJECT_TYPE:
+    case JS_API_OBJECT_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
-    case JS_TYPED_ARRAY_TYPE:
+    case JS_ARRAY_TYPE:
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_DATA_VIEW_TYPE:
-    case JS_SET_TYPE:
-    case JS_MAP_TYPE:
-    case JS_SET_ITERATOR_TYPE:
+    case JS_DATE_TYPE:
+    case JS_FUNCTION_TYPE:
+    case JS_GENERATOR_OBJECT_TYPE:
     case JS_MAP_ITERATOR_TYPE:
-    case JS_WEAK_MAP_TYPE:
-    case JS_WEAK_SET_TYPE:
+    case JS_MAP_TYPE:
+    case JS_MESSAGE_OBJECT_TYPE:
+    case JS_MODULE_TYPE:
+    case JS_OBJECT_TYPE:
     case JS_PROMISE_TYPE:
     case JS_REGEXP_TYPE:
-    case JS_FUNCTION_TYPE:
+    case JS_SET_ITERATOR_TYPE:
+    case JS_SET_TYPE:
+    case JS_SPECIAL_API_OBJECT_TYPE:
+    case JS_TYPED_ARRAY_TYPE:
+    case JS_VALUE_TYPE:
+    case JS_WEAK_MAP_TYPE:
+    case JS_WEAK_SET_TYPE:
       return true;
 
-    case JS_BOUND_FUNCTION_TYPE:
-    case JS_PROXY_TYPE:
-    case JS_GLOBAL_PROXY_TYPE:
-    case JS_GLOBAL_OBJECT_TYPE:
+    case BYTECODE_ARRAY_TYPE:
+    case BYTE_ARRAY_TYPE:
+    case CELL_TYPE:
+    case CODE_TYPE:
+    case FILLER_TYPE:
     case FIXED_ARRAY_TYPE:
     case FIXED_DOUBLE_ARRAY_TYPE:
-    case ODDBALL_TYPE:
     case FOREIGN_TYPE:
-    case MAP_TYPE:
-    case CODE_TYPE:
-    case CELL_TYPE:
-    case PROPERTY_CELL_TYPE:
-    case WEAK_CELL_TYPE:
-    case SYMBOL_TYPE:
-    case BYTECODE_ARRAY_TYPE:
-    case HEAP_NUMBER_TYPE:
-    case MUTABLE_HEAP_NUMBER_TYPE:
-    case SIMD128_VALUE_TYPE:
-    case FILLER_TYPE:
-    case BYTE_ARRAY_TYPE:
     case FREE_SPACE_TYPE:
+    case HEAP_NUMBER_TYPE:
+    case JS_BOUND_FUNCTION_TYPE:
+    case JS_GLOBAL_OBJECT_TYPE:
+    case JS_GLOBAL_PROXY_TYPE:
+    case JS_PROXY_TYPE:
+    case MAP_TYPE:
+    case MUTABLE_HEAP_NUMBER_TYPE:
+    case ODDBALL_TYPE:
+    case PROPERTY_CELL_TYPE:
     case SHARED_FUNCTION_INFO_TYPE:
+    case SIMD128_VALUE_TYPE:
+    case SYMBOL_TYPE:
+    case WEAK_CELL_TYPE:
 
 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
   case FIXED_##TYPE##_ARRAY_TYPE:
@@ -13085,7 +12076,7 @@
 
 
 void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
-  DCHECK(function->IsConstructor() || function->shared()->is_generator());
+  DCHECK(function->IsConstructor() || function->shared()->is_resumable());
   if (function->has_initial_map()) return;
   Isolate* isolate = function->GetIsolate();
 
@@ -13096,7 +12087,7 @@
   // First create a new map with the size and number of in-object properties
   // suggested by the function.
   InstanceType instance_type;
-  if (function->shared()->is_generator()) {
+  if (function->shared()->is_resumable()) {
     instance_type = JS_GENERATOR_OBJECT_TYPE;
   } else {
     instance_type = JS_OBJECT_TYPE;
@@ -13296,18 +12287,6 @@
   return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
 }
 
-// static
-MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
-                                             Handle<JSBoundFunction> function) {
-  Handle<String> prefix = isolate->factory()->bound__string();
-  if (!function->bound_target_function()->IsJSFunction()) return prefix;
-  Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
-                            isolate);
-  Handle<Object> target_name = JSFunction::GetName(target);
-  if (!target_name->IsString()) return prefix;
-  Factory* factory = isolate->factory();
-  return factory->NewConsString(prefix, Handle<String>::cast(target_name));
-}
 
 // static
 Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
@@ -13341,10 +12320,16 @@
   IncrementalStringBuilder builder(isolate);
   if (!shared_info->is_arrow()) {
     if (shared_info->is_concise_method()) {
-      if (shared_info->is_generator()) builder.AppendCharacter('*');
+      if (shared_info->is_generator()) {
+        builder.AppendCharacter('*');
+      } else if (shared_info->is_async()) {
+        builder.AppendCString("async ");
+      }
     } else {
       if (shared_info->is_generator()) {
         builder.AppendCString("function* ");
+      } else if (shared_info->is_async()) {
+        builder.AppendCString("async function ");
       } else {
         builder.AppendCString("function ");
       }
@@ -13366,6 +12351,7 @@
       isolate->factory()->InternalizeUtf8String(to_string);
   Handle<String> internalized_type_of =
       isolate->factory()->InternalizeUtf8String(type_of);
+  oddball->set_to_number_raw(to_number->Number());
   oddball->set_to_boolean(isolate->heap()->ToBoolean(to_boolean));
   oddball->set_to_number(*to_number);
   oddball->set_to_string(*internalized_to_string);
@@ -13373,6 +12359,45 @@
   oddball->set_kind(kind);
 }
 
+void Script::SetEvalOrigin(Handle<Script> script,
+                           Handle<SharedFunctionInfo> outer_info,
+                           int eval_position) {
+  if (eval_position == RelocInfo::kNoPosition) {
+    // If the position is missing, attempt to get the code offset from the
+    // current activation.  Do not translate the code offset into source
+    // position, but store it as negative value for lazy translation.
+    StackTraceFrameIterator it(script->GetIsolate());
+    if (!it.done() && it.is_javascript()) {
+      FrameSummary summary = FrameSummary::GetFirst(it.javascript_frame());
+      script->set_eval_from_shared(summary.function()->shared());
+      script->set_eval_from_position(-summary.code_offset());
+      return;
+    }
+    eval_position = 0;
+  }
+  script->set_eval_from_shared(*outer_info);
+  script->set_eval_from_position(eval_position);
+}
+
+int Script::GetEvalPosition() {
+  DisallowHeapAllocation no_gc;
+  DCHECK(compilation_type() == Script::COMPILATION_TYPE_EVAL);
+  int position = eval_from_position();
+  if (position < 0) {
+    // Due to laziness, the position may not have been translated from code
+    // offset yet, which would be encoded as negative integer. In that case,
+    // translate and set the position.
+    if (eval_from_shared()->IsUndefined()) {
+      position = 0;
+    } else {
+      SharedFunctionInfo* shared = SharedFunctionInfo::cast(eval_from_shared());
+      position = shared->abstract_code()->SourcePosition(-position);
+    }
+    DCHECK(position >= 0);
+    set_eval_from_position(position);
+  }
+  return position;
+}
 
 void Script::InitLineEnds(Handle<Script> script) {
   if (!script->line_ends()->IsUndefined()) return;
@@ -13515,7 +12540,8 @@
   SharedFunctionInfo* shared;
   while ((shared = iterator.Next<SharedFunctionInfo>())) {
     if (fun->function_token_position() == shared->function_token_position() &&
-        fun->start_position() == shared->start_position()) {
+        fun->start_position() == shared->start_position() &&
+        fun->end_position() == shared->end_position()) {
       return Handle<SharedFunctionInfo>(shared);
     }
   }
@@ -13813,6 +12839,32 @@
   }
 }
 
+namespace {
+
+// Sets the expected number of properties based on estimate from parser.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+                                          FunctionLiteral* literal) {
+  int estimate = literal->expected_property_count();
+
+  // If no properties are added in the constructor, they are more likely
+  // to be added later.
+  if (estimate == 0) estimate = 2;
+
+  // TODO(yangguo): check whether those heuristics are still up-to-date.
+  // We do not shrink objects that go into a snapshot (yet), so we adjust
+  // the estimate conservatively.
+  if (shared->GetIsolate()->serializer_enabled()) {
+    estimate += 2;
+  } else {
+    // Inobject slack tracking will reclaim redundant inobject space later,
+    // so we can afford to adjust the estimate generously.
+    estimate += 8;
+  }
+
+  shared->set_expected_nof_properties(estimate);
+}
+
+}  // namespace
 
 void SharedFunctionInfo::InitFromFunctionLiteral(
     Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
@@ -13838,6 +12890,7 @@
   }
   shared_info->set_dont_crankshaft(lit->flags() &
                                    AstProperties::kDontCrankshaft);
+  shared_info->set_never_compiled(true);
   shared_info->set_kind(lit->kind());
   if (!IsConstructable(lit->kind(), lit->language_mode())) {
     shared_info->set_construct_stub(
@@ -13845,6 +12898,7 @@
   }
   shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
   shared_info->set_asm_function(lit->scope()->asm_function());
+  SetExpectedNofPropertiesFromEstimate(shared_info, lit);
 }
 
 
@@ -13880,8 +12934,15 @@
   set_ic_age(new_ic_age);
   if (code()->kind() == Code::FUNCTION) {
     code()->set_profiler_ticks(0);
-    if (optimization_disabled() &&
-        opt_count() >= FLAG_max_opt_count) {
+    if (optimization_disabled() && opt_count() >= FLAG_max_opt_count) {
+      // Re-enable optimizations if they were disabled due to opt_count limit.
+      set_optimization_disabled(false);
+    }
+    set_opt_count(0);
+    set_deopt_count(0);
+  } else if (code()->is_interpreter_entry_trampoline()) {
+    set_profiler_ticks(0);
+    if (optimization_disabled() && opt_count() >= FLAG_max_opt_count) {
       // Re-enable optimizations if they were disabled due to opt_count limit.
       set_optimization_disabled(false);
     }
@@ -13941,12 +13002,6 @@
                     : LiteralsArray::cast(literals_cell->value())};
     }
   }
-  if (FLAG_trace_opt && !OptimizedCodeMapIsCleared() &&
-      result.code == nullptr) {
-    PrintF("[didn't find optimized code in optimized code map for ");
-    ShortPrint();
-    PrintF("]\n");
-  }
   return result;
 }
 
@@ -14230,126 +13285,14 @@
 }
 
 
-void Code::FindAllMaps(MapHandleList* maps) {
-  DCHECK(is_inline_cache_stub());
-  DisallowHeapAllocation no_allocation;
-  int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-  for (RelocIterator it(this, mask); !it.done(); it.next()) {
-    RelocInfo* info = it.rinfo();
-    Object* object = info->target_object();
-    if (object->IsWeakCell()) object = WeakCell::cast(object)->value();
-    if (object->IsMap()) maps->Add(handle(Map::cast(object)));
-  }
-}
-
-
-Code* Code::FindFirstHandler() {
-  DCHECK(is_inline_cache_stub());
-  DisallowHeapAllocation no_allocation;
-  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
-             RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-  bool skip_next_handler = false;
-  for (RelocIterator it(this, mask); !it.done(); it.next()) {
-    RelocInfo* info = it.rinfo();
-    if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
-      Object* obj = info->target_object();
-      skip_next_handler |= obj->IsWeakCell() && WeakCell::cast(obj)->cleared();
-    } else {
-      Code* code = Code::GetCodeFromTargetAddress(info->target_address());
-      if (code->kind() == Code::HANDLER) {
-        if (!skip_next_handler) return code;
-        skip_next_handler = false;
-      }
-    }
-  }
-  return NULL;
-}
-
-
-bool Code::FindHandlers(CodeHandleList* code_list, int length) {
-  DCHECK(is_inline_cache_stub());
-  DisallowHeapAllocation no_allocation;
-  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
-             RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-  bool skip_next_handler = false;
-  int i = 0;
-  for (RelocIterator it(this, mask); !it.done(); it.next()) {
-    if (i == length) return true;
-    RelocInfo* info = it.rinfo();
-    if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
-      Object* obj = info->target_object();
-      skip_next_handler |= obj->IsWeakCell() && WeakCell::cast(obj)->cleared();
-    } else {
-      Code* code = Code::GetCodeFromTargetAddress(info->target_address());
-      // IC stubs with handlers never contain non-handler code objects before
-      // handler targets.
-      if (code->kind() != Code::HANDLER) break;
-      if (!skip_next_handler) {
-        code_list->Add(Handle<Code>(code));
-        i++;
-      }
-      skip_next_handler = false;
-    }
-  }
-  return i == length;
-}
-
-
-MaybeHandle<Code> Code::FindHandlerForMap(Map* map) {
-  DCHECK(is_inline_cache_stub());
-  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
-             RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-  bool return_next = false;
-  for (RelocIterator it(this, mask); !it.done(); it.next()) {
-    RelocInfo* info = it.rinfo();
-    if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
-      Object* object = info->target_object();
-      if (object->IsWeakCell()) object = WeakCell::cast(object)->value();
-      if (object == map) return_next = true;
-    } else if (return_next) {
-      Code* code = Code::GetCodeFromTargetAddress(info->target_address());
-      DCHECK(code->kind() == Code::HANDLER);
-      return handle(code);
-    }
-  }
-  return MaybeHandle<Code>();
-}
-
-
-Name* Code::FindFirstName() {
-  DCHECK(is_inline_cache_stub());
-  DisallowHeapAllocation no_allocation;
-  int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-  for (RelocIterator it(this, mask); !it.done(); it.next()) {
-    RelocInfo* info = it.rinfo();
-    Object* object = info->target_object();
-    if (object->IsName()) return Name::cast(object);
-  }
-  return NULL;
-}
-
-
 void Code::ClearInlineCaches() {
-  ClearInlineCaches(NULL);
-}
-
-
-void Code::ClearInlineCaches(Code::Kind kind) {
-  ClearInlineCaches(&kind);
-}
-
-
-void Code::ClearInlineCaches(Code::Kind* kind) {
   int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
              RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
     if (target->is_inline_cache_stub()) {
-      if (kind == NULL || *kind == target->kind()) {
-        IC::Clear(this->GetIsolate(), info->pc(),
-                  info->host()->constant_pool());
-      }
+      IC::Clear(this->GetIsolate(), info->pc(), info->host()->constant_pool());
     }
   }
 }
@@ -14841,8 +13784,9 @@
     int pc_and_state = this->PcAndState(i)->value();
     os << std::setw(6) << this->AstId(i).ToInt() << "  " << std::setw(8)
        << FullCodeGenerator::PcField::decode(pc_and_state) << "  "
-       << FullCodeGenerator::State2String(
-              FullCodeGenerator::StateField::decode(pc_and_state)) << "\n";
+       << Deoptimizer::BailoutStateToString(
+              FullCodeGenerator::BailoutStateField::decode(pc_and_state))
+       << "\n";
   }
 }
 
@@ -14881,8 +13825,8 @@
     case UNINITIALIZED: return "UNINITIALIZED";
     case PREMONOMORPHIC: return "PREMONOMORPHIC";
     case MONOMORPHIC: return "MONOMORPHIC";
-    case PROTOTYPE_FAILURE:
-      return "PROTOTYPE_FAILURE";
+    case RECOMPUTE_HANDLER:
+      return "RECOMPUTE_HANDLER";
     case POLYMORPHIC: return "POLYMORPHIC";
     case MEGAMORPHIC: return "MEGAMORPHIC";
     case GENERIC: return "GENERIC";
@@ -14893,16 +13837,6 @@
 }
 
 
-const char* Code::StubType2String(StubType type) {
-  switch (type) {
-    case NORMAL: return "NORMAL";
-    case FAST: return "FAST";
-  }
-  UNREACHABLE();  // keep the compiler happy
-  return NULL;
-}
-
-
 void Code::PrintExtraICState(std::ostream& os,  // NOLINT
                              Kind kind, ExtraICState extra) {
   os << "extra_ic_state = ";
@@ -14924,9 +13858,6 @@
   if (is_inline_cache_stub()) {
     os << "ic_state = " << ICState2String(ic_state()) << "\n";
     PrintExtraICState(os, kind(), extra_ic_state());
-    if (ic_state() == MONOMORPHIC) {
-      os << "type = " << StubType2String(type()) << "\n";
-    }
     if (is_compare_ic_stub()) {
       DCHECK(CodeStub::GetMajorKey(this) == CodeStub::CompareIC);
       CompareICStub stub(stub_key(), GetIsolate());
@@ -15103,7 +14034,6 @@
 void BytecodeArray::Disassemble(std::ostream& os) {
   os << "Parameter count " << parameter_count() << "\n";
   os << "Frame size " << frame_size() << "\n";
-  Vector<char> buf = Vector<char>::New(50);
 
   const uint8_t* base_address = GetFirstBytecodeAddress();
   interpreter::SourcePositionTableIterator source_positions(
@@ -15120,12 +14050,13 @@
       os << "         ";
     }
     const uint8_t* current_address = base_address + iterator.current_offset();
-    SNPrintF(buf, "%p", current_address);
-    os << buf.start() << " : ";
+    os << reinterpret_cast<const void*>(current_address) << " @ "
+       << std::setw(4) << iterator.current_offset() << " : ";
     interpreter::Bytecodes::Decode(os, current_address, parameter_count());
     if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
-      SNPrintF(buf, " (%p)", base_address + iterator.GetJumpTargetOffset());
-      os << buf.start();
+      const void* jump_target = base_address + iterator.GetJumpTargetOffset();
+      os << " (" << jump_target << " @ " << iterator.GetJumpTargetOffset()
+         << ")";
     }
     os << std::endl;
     iterator.Advance();
@@ -15158,29 +14089,6 @@
       array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
 }
 
-
-// Returns false if the passed-in index is marked non-configurable, which will
-// cause the truncation operation to halt, and thus no further old values need
-// be collected.
-static bool GetOldValue(Isolate* isolate,
-                        Handle<JSObject> object,
-                        uint32_t index,
-                        List<Handle<Object> >* old_values,
-                        List<uint32_t>* indices) {
-  LookupIterator it(isolate, object, index, object, LookupIterator::HIDDEN);
-  CHECK(JSReceiver::GetPropertyAttributes(&it).IsJust());
-  DCHECK(it.IsFound());
-  if (!it.IsConfigurable()) return false;
-  Handle<Object> value =
-      it.state() == LookupIterator::ACCESSOR
-          ? Handle<Object>::cast(isolate->factory()->the_hole_value())
-          : JSReceiver::GetDataProperty(&it);
-  old_values->Add(value);
-  indices->Add(index);
-  return true;
-}
-
-
 void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
   // We should never end in here with a pixel or external array.
   DCHECK(array->AllowsSetLength());
@@ -15191,91 +14099,6 @@
 }
 
 
-MaybeHandle<Object> JSArray::ObservableSetLength(Handle<JSArray> array,
-                                                 uint32_t new_length) {
-  if (!array->map()->is_observed()) {
-    SetLength(array, new_length);
-    return array;
-  }
-
-  Isolate* isolate = array->GetIsolate();
-  List<uint32_t> indices;
-  List<Handle<Object> > old_values;
-  Handle<Object> old_length_handle(array->length(), isolate);
-  uint32_t old_length = 0;
-  CHECK(old_length_handle->ToArrayLength(&old_length));
-
-  int num_elements = array->NumberOfOwnElements(ALL_PROPERTIES);
-  if (num_elements > 0) {
-    if (old_length == static_cast<uint32_t>(num_elements)) {
-      // Simple case for arrays without holes.
-      for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
-        if (!GetOldValue(isolate, array, i, &old_values, &indices)) break;
-      }
-    } else {
-      // For sparse arrays, only iterate over existing elements.
-      // TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
-      // the to-be-removed indices twice.
-      Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
-      array->GetOwnElementKeys(*keys, ALL_PROPERTIES);
-      while (num_elements-- > 0) {
-        uint32_t index = NumberToUint32(keys->get(num_elements));
-        if (index < new_length) break;
-        if (!GetOldValue(isolate, array, index, &old_values, &indices)) break;
-      }
-    }
-  }
-
-  SetLength(array, new_length);
-
-  CHECK(array->length()->ToArrayLength(&new_length));
-  if (old_length == new_length) return array;
-
-  RETURN_ON_EXCEPTION(isolate, BeginPerformSplice(array), Object);
-
-  for (int i = 0; i < indices.length(); ++i) {
-    // For deletions where the property was an accessor, old_values[i]
-    // will be the hole, which instructs EnqueueChangeRecord to elide
-    // the "oldValue" property.
-    RETURN_ON_EXCEPTION(
-        isolate,
-        JSObject::EnqueueChangeRecord(
-            array, "delete", isolate->factory()->Uint32ToString(indices[i]),
-            old_values[i]),
-        Object);
-  }
-
-  RETURN_ON_EXCEPTION(isolate,
-                      JSObject::EnqueueChangeRecord(
-                          array, "update", isolate->factory()->length_string(),
-                          old_length_handle),
-                      Object);
-
-  RETURN_ON_EXCEPTION(isolate, EndPerformSplice(array), Object);
-
-  uint32_t index = Min(old_length, new_length);
-  uint32_t add_count = new_length > old_length ? new_length - old_length : 0;
-  uint32_t delete_count = new_length < old_length ? old_length - new_length : 0;
-  Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
-  if (delete_count > 0) {
-    for (int i = indices.length() - 1; i >= 0; i--) {
-      // Skip deletions where the property was an accessor, leaving holes
-      // in the array of old values.
-      if (old_values[i]->IsTheHole()) continue;
-      JSObject::AddDataElement(deleted, indices[i] - index, old_values[i], NONE)
-          .Assert();
-    }
-
-    JSArray::SetLength(deleted, delete_count);
-  }
-
-  RETURN_ON_EXCEPTION(
-      isolate, EnqueueSpliceRecord(array, index, deleted, add_count), Object);
-
-  return array;
-}
-
-
 // static
 void Map::AddDependentCode(Handle<Map> map,
                            DependentCode::DependencyGroup group,
@@ -15603,7 +14426,7 @@
                                   bool from_javascript,
                                   ShouldThrow should_throw) {
   Isolate* isolate = proxy->GetIsolate();
-  STACK_CHECK(Nothing<bool>());
+  STACK_CHECK(isolate, Nothing<bool>());
   Handle<Name> trap_name = isolate->factory()->setPrototypeOf_string();
   // 1. Assert: Either Type(V) is Object or Type(V) is Null.
   DCHECK(value->IsJSReceiver() || value->IsNull());
@@ -15674,46 +14497,10 @@
                                    ShouldThrow should_throw) {
   Isolate* isolate = object->GetIsolate();
 
-  const bool observed = from_javascript && object->map()->is_observed();
-  Handle<Object> old_value;
-  if (observed) {
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, old_value,
-                                     JSReceiver::GetPrototype(isolate, object),
-                                     Nothing<bool>());
-  }
-
-  Maybe<bool> result =
-      SetPrototypeUnobserved(object, value, from_javascript, should_throw);
-  MAYBE_RETURN(result, Nothing<bool>());
-
-  if (result.FromJust() && observed) {
-    Handle<Object> new_value;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, new_value,
-                                     JSReceiver::GetPrototype(isolate, object),
-                                     Nothing<bool>());
-    if (!new_value->SameValue(*old_value)) {
-      RETURN_ON_EXCEPTION_VALUE(
-          isolate, JSObject::EnqueueChangeRecord(
-                       object, "setPrototype",
-                       isolate->factory()->proto_string(), old_value),
-          Nothing<bool>());
-    }
-  }
-
-  return result;
-}
-
-
-Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
-                                             Handle<Object> value,
-                                             bool from_javascript,
-                                             ShouldThrow should_throw) {
 #ifdef DEBUG
   int size = object->Size();
 #endif
 
-  Isolate* isolate = object->GetIsolate();
-
   if (from_javascript) {
     if (object->IsAccessCheckNeeded() &&
         !isolate->MayAccess(handle(isolate->context()), object)) {
@@ -15949,12 +14736,8 @@
   uint32_t old_length = 0;
   uint32_t new_capacity = 0;
 
-  Handle<Object> old_length_handle;
   if (object->IsJSArray()) {
     CHECK(JSArray::cast(*object)->length()->ToArrayLength(&old_length));
-    if (object->map()->is_observed()) {
-      old_length_handle = handle(JSArray::cast(*object)->length(), isolate);
-    }
   }
 
   ElementsKind kind = object->GetElementsKind();
@@ -15998,38 +14781,6 @@
     JSArray::cast(*object)->set_length(*new_length_handle);
   }
 
-  if (!old_length_handle.is_null() && new_length != old_length) {
-    // |old_length_handle| is kept null above unless the object is observed.
-    DCHECK(object->map()->is_observed());
-    Handle<JSArray> array = Handle<JSArray>::cast(object);
-    Handle<String> name = isolate->factory()->Uint32ToString(index);
-
-    RETURN_ON_EXCEPTION_VALUE(isolate, BeginPerformSplice(array),
-                              Nothing<bool>());
-    RETURN_ON_EXCEPTION_VALUE(
-        isolate, EnqueueChangeRecord(array, "add", name,
-                                     isolate->factory()->the_hole_value()),
-        Nothing<bool>());
-    RETURN_ON_EXCEPTION_VALUE(
-        isolate, EnqueueChangeRecord(array, "update",
-                                     isolate->factory()->length_string(),
-                                     old_length_handle),
-        Nothing<bool>());
-    RETURN_ON_EXCEPTION_VALUE(isolate, EndPerformSplice(array),
-                              Nothing<bool>());
-    Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
-    RETURN_ON_EXCEPTION_VALUE(isolate,
-                              EnqueueSpliceRecord(array, old_length, deleted,
-                                                  new_length - old_length),
-                              Nothing<bool>());
-  } else if (object->map()->is_observed()) {
-    Handle<String> name = isolate->factory()->Uint32ToString(index);
-    RETURN_ON_EXCEPTION_VALUE(
-        isolate, EnqueueChangeRecord(object, "add", name,
-                                     isolate->factory()->the_hole_value()),
-        Nothing<bool>());
-  }
-
   return Just(true);
 }
 
@@ -16520,188 +15271,25 @@
   }
 }
 
-void JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
-                                       PropertyFilter filter) {
-  if (HasFastProperties()) {
-    int real_size = map()->NumberOfOwnDescriptors();
-    Handle<DescriptorArray> descs(map()->instance_descriptors());
-    for (int i = 0; i < real_size; i++) {
-      PropertyDetails details = descs->GetDetails(i);
-      if ((details.attributes() & filter) != 0) continue;
-      if (filter & ONLY_ALL_CAN_READ) {
-        if (details.kind() != kAccessor) continue;
-        Object* accessors = descs->GetValue(i);
-        if (!accessors->IsAccessorInfo()) continue;
-        if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
-      }
-      Name* key = descs->GetKey(i);
-      if (key->FilterKey(filter)) continue;
-      keys->AddKey(key, DO_NOT_CONVERT);
-    }
-  } else if (IsJSGlobalObject()) {
-    GlobalDictionary::CollectKeysTo(handle(global_dictionary()), keys, filter);
-  } else {
-    NameDictionary::CollectKeysTo(handle(property_dictionary()), keys, filter);
-  }
-}
-
-
-int JSObject::NumberOfOwnElements(PropertyFilter filter) {
-  // Fast case for objects with no elements.
-  if (!IsJSValue() && HasFastElements()) {
-    uint32_t length =
-        IsJSArray()
-            ? static_cast<uint32_t>(
-                  Smi::cast(JSArray::cast(this)->length())->value())
-            : static_cast<uint32_t>(FixedArrayBase::cast(elements())->length());
-    if (length == 0) return 0;
-  }
-  // Compute the number of enumerable elements.
-  return GetOwnElementKeys(NULL, filter);
-}
-
-void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
-                                     KeyAccumulator* keys,
-                                     PropertyFilter filter) {
-  if (filter & SKIP_STRINGS) return;
-  ElementsAccessor* accessor = object->GetElementsAccessor();
-  accessor->CollectElementIndices(object, keys, kMaxUInt32, filter, 0);
-}
-
-
-int JSObject::GetOwnElementKeys(FixedArray* storage, PropertyFilter filter) {
-  int counter = 0;
-
-  // If this is a String wrapper, add the string indices first,
-  // as they're guaranteed to precede the elements in numerical order
-  // and ascending order is required by ECMA-262, 6th, 9.1.12.
-  if (IsJSValue()) {
-    Object* val = JSValue::cast(this)->value();
-    if (val->IsString()) {
-      String* str = String::cast(val);
-      if (storage) {
-        for (int i = 0; i < str->length(); i++) {
-          storage->set(counter + i, Smi::FromInt(i));
-        }
-      }
-      counter += str->length();
+bool JSObject::WasConstructedFromApiFunction() {
+  auto instance_type = map()->instance_type();
+  bool is_api_object = instance_type == JS_API_OBJECT_TYPE ||
+                       instance_type == JS_SPECIAL_API_OBJECT_TYPE;
+#ifdef ENABLE_SLOW_DCHECKS
+  if (FLAG_enable_slow_asserts) {
+    Object* maybe_constructor = map()->GetConstructor();
+    if (!maybe_constructor->IsJSFunction()) return false;
+    JSFunction* constructor = JSFunction::cast(maybe_constructor);
+    if (constructor->shared()->IsApiFunction()) {
+      DCHECK(is_api_object);
+    } else {
+      DCHECK(!is_api_object);
     }
   }
-
-  switch (GetElementsKind()) {
-    case FAST_SMI_ELEMENTS:
-    case FAST_ELEMENTS:
-    case FAST_HOLEY_SMI_ELEMENTS:
-    case FAST_HOLEY_ELEMENTS:
-    case FAST_STRING_WRAPPER_ELEMENTS: {
-      int length = IsJSArray() ?
-          Smi::cast(JSArray::cast(this)->length())->value() :
-          FixedArray::cast(elements())->length();
-      for (int i = 0; i < length; i++) {
-        if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
-          if (storage != NULL) {
-            storage->set(counter, Smi::FromInt(i));
-          }
-          counter++;
-        }
-      }
-      DCHECK(!storage || storage->length() >= counter);
-      break;
-    }
-    case FAST_DOUBLE_ELEMENTS:
-    case FAST_HOLEY_DOUBLE_ELEMENTS: {
-      int length = IsJSArray() ?
-          Smi::cast(JSArray::cast(this)->length())->value() :
-          FixedArrayBase::cast(elements())->length();
-      for (int i = 0; i < length; i++) {
-        if (!FixedDoubleArray::cast(elements())->is_the_hole(i)) {
-          if (storage != NULL) {
-            storage->set(counter, Smi::FromInt(i));
-          }
-          counter++;
-        }
-      }
-      DCHECK(!storage || storage->length() >= counter);
-      break;
-    }
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                      \
-    case TYPE##_ELEMENTS:                                                    \
-
-    TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-    {
-      int length = FixedArrayBase::cast(elements())->length();
-      while (counter < length) {
-        if (storage != NULL) {
-          storage->set(counter, Smi::FromInt(counter));
-        }
-        counter++;
-      }
-      DCHECK(!storage || storage->length() >= counter);
-      break;
-    }
-
-    case DICTIONARY_ELEMENTS:
-    case SLOW_STRING_WRAPPER_ELEMENTS: {
-      if (storage != NULL) {
-        element_dictionary()->CopyKeysTo(storage, counter, filter,
-                                         SeededNumberDictionary::SORTED);
-      }
-      counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
-      break;
-    }
-    case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
-    case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
-      FixedArray* parameter_map = FixedArray::cast(elements());
-      int mapped_length = parameter_map->length() - 2;
-      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-      if (arguments->IsDictionary()) {
-        // Copy the keys from arguments first, because Dictionary::CopyKeysTo
-        // will insert in storage starting at index 0.
-        SeededNumberDictionary* dictionary =
-            SeededNumberDictionary::cast(arguments);
-        if (storage != NULL) {
-          dictionary->CopyKeysTo(storage, counter, filter,
-                                 SeededNumberDictionary::UNSORTED);
-        }
-        counter += dictionary->NumberOfElementsFilterAttributes(filter);
-        for (int i = 0; i < mapped_length; ++i) {
-          if (!parameter_map->get(i + 2)->IsTheHole()) {
-            if (storage != NULL) storage->set(counter, Smi::FromInt(i));
-            ++counter;
-          }
-        }
-        if (storage != NULL) storage->SortPairs(storage, counter);
-
-      } else {
-        int backing_length = arguments->length();
-        int i = 0;
-        for (; i < mapped_length; ++i) {
-          if (!parameter_map->get(i + 2)->IsTheHole()) {
-            if (storage != NULL) storage->set(counter, Smi::FromInt(i));
-            ++counter;
-          } else if (i < backing_length && !arguments->get(i)->IsTheHole()) {
-            if (storage != NULL) storage->set(counter, Smi::FromInt(i));
-            ++counter;
-          }
-        }
-        for (; i < backing_length; ++i) {
-          if (storage != NULL) storage->set(counter, Smi::FromInt(i));
-          ++counter;
-        }
-      }
-      break;
-    }
-    case NO_ELEMENTS:
-      break;
-  }
-
-  DCHECK(!storage || storage->length() == counter);
-  return counter;
+#endif
+  return is_api_object;
 }
 
-
 MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
                                                 Handle<Object> object) {
   if (object->IsUndefined()) return isolate->factory()->undefined_to_string();
@@ -16900,22 +15488,6 @@
 
 
 // static
-MaybeHandle<JSRegExp> JSRegExp::New(Handle<String> pattern,
-                                    Handle<String> flags_string) {
-  Isolate* isolate = pattern->GetIsolate();
-  bool success = false;
-  Flags flags = RegExpFlagsFromString(flags_string, &success);
-  if (!success) {
-    THROW_NEW_ERROR(
-        isolate,
-        NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
-        JSRegExp);
-  }
-  return New(pattern, flags);
-}
-
-
-// static
 Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
   Isolate* const isolate = regexp->GetIsolate();
   return Handle<JSRegExp>::cast(isolate->factory()->CopyJSObject(regexp));
@@ -17006,6 +15578,9 @@
   ASSIGN_RETURN_ON_EXCEPTION(isolate, escaped_source,
                              EscapeRegExpSource(isolate, source), JSRegExp);
 
+  RETURN_ON_EXCEPTION(isolate, RegExpImpl::Compile(regexp, source, flags),
+                      JSRegExp);
+
   regexp->set_source(*escaped_source);
   regexp->set_flags(Smi::FromInt(flags));
 
@@ -17026,9 +15601,6 @@
         .Check();
   }
 
-  RETURN_ON_EXCEPTION(isolate, RegExpImpl::Compile(regexp, source, flags),
-                      JSRegExp);
-
   return regexp;
 }
 
@@ -17238,10 +15810,13 @@
 
   // Rehash the elements.
   int capacity = this->Capacity();
+  Heap* heap = new_table->GetHeap();
+  Object* the_hole = heap->the_hole_value();
+  Object* undefined = heap->undefined_value();
   for (int i = 0; i < capacity; i++) {
     uint32_t from_index = EntryToIndex(i);
     Object* k = this->get(from_index);
-    if (IsKey(k)) {
+    if (k != the_hole && k != undefined) {
       uint32_t hash = this->HashForObject(key, k);
       uint32_t insertion_index =
           EntryToIndex(new_table->FindInsertionEntry(hash));
@@ -17415,9 +15990,12 @@
   uint32_t entry = FirstProbe(hash, capacity);
   uint32_t count = 1;
   // EnsureCapacity will guarantee the hash table is never full.
+  Heap* heap = GetHeap();
+  Object* the_hole = heap->the_hole_value();
+  Object* undefined = heap->undefined_value();
   while (true) {
     Object* element = KeyAt(entry);
-    if (element->IsUndefined() || element->IsTheHole()) break;
+    if (element == the_hole || element == undefined) break;
     entry = NextProbe(entry, count++, capacity);
   }
   return entry;
@@ -17564,6 +16142,30 @@
 template int NameDictionaryBase<NameDictionary, NameDictionaryShape>::FindEntry(
     Handle<Name>);
 
+template int Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
+    NumberOfElementsFilterAttributes(PropertyFilter filter);
+
+template int Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::
+    NumberOfElementsFilterAttributes(PropertyFilter filter);
+
+template void Dictionary<GlobalDictionary, GlobalDictionaryShape,
+                         Handle<Name>>::CopyEnumKeysTo(FixedArray* storage);
+
+template void Dictionary<NameDictionary, NameDictionaryShape,
+                         Handle<Name>>::CopyEnumKeysTo(FixedArray* storage);
+
+template void
+Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
+    CollectKeysTo(Handle<Dictionary<GlobalDictionary, GlobalDictionaryShape,
+                                    Handle<Name>>>
+                      dictionary,
+                  KeyAccumulator* keys, PropertyFilter filter);
+
+template void
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CollectKeysTo(
+    Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
+        dictionary,
+    KeyAccumulator* keys, PropertyFilter filter);
 
 Handle<Object> JSObject::PrepareSlowElementsForSort(
     Handle<JSObject> object, uint32_t limit) {
@@ -17660,8 +16262,7 @@
 Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
                                                 uint32_t limit) {
   Isolate* isolate = object->GetIsolate();
-  if (object->HasSloppyArgumentsElements() ||
-      object->map()->is_observed()) {
+  if (object->HasSloppyArgumentsElements()) {
     return handle(Smi::FromInt(-1), isolate);
   }
 
@@ -17991,6 +16592,16 @@
     Handle<ConsString> cons = Handle<ConsString>::cast(string);
     cons->set_first(*result);
     cons->set_second(isolate->heap()->empty_string());
+  } else if (string->IsSlicedString()) {
+    STATIC_ASSERT(ConsString::kSize == SlicedString::kSize);
+    DisallowHeapAllocation no_gc;
+    bool one_byte = result->IsOneByteRepresentation();
+    Handle<Map> map = one_byte ? isolate->factory()->cons_one_byte_string_map()
+                               : isolate->factory()->cons_string_map();
+    string->set_map(*map);
+    Handle<ConsString> cons = Handle<ConsString>::cast(string);
+    cons->set_first(*result);
+    cons->set_second(isolate->heap()->empty_string());
   }
   return result;
 }
@@ -18097,23 +16708,11 @@
   Isolate* isolate = cache->GetIsolate();
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
   StringSharedKey key(src, shared, language_mode, RelocInfo::kNoPosition);
-  {
-    Handle<Object> k = key.AsHandle(isolate);
-    DisallowHeapAllocation no_allocation_scope;
-    int entry = cache->FindEntry(&key);
-    if (entry != kNotFound) {
-      cache->set(EntryToIndex(entry), *k);
-      cache->set(EntryToIndex(entry) + 1, *value);
-      return cache;
-    }
-  }
-
+  Handle<Object> k = key.AsHandle(isolate);
   cache = EnsureCapacity(cache, 1, &key);
   int entry = cache->FindInsertionEntry(key.Hash());
-  Handle<Object> k =
-      isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
   cache->set(EntryToIndex(entry), *k);
-  cache->set(EntryToIndex(entry) + 1, Smi::FromInt(kHashGenerations));
+  cache->set(EntryToIndex(entry) + 1, *value);
   cache->ElementAdded();
   return cache;
 }
@@ -18542,30 +17141,6 @@
   }
 }
 
-
-template <typename Derived, typename Shape, typename Key>
-int Dictionary<Derived, Shape, Key>::CopyKeysTo(
-    FixedArray* storage, int index, PropertyFilter filter,
-    typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
-  DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
-  int start_index = index;
-  int capacity = this->Capacity();
-  for (int i = 0; i < capacity; i++) {
-    Object* k = this->KeyAt(i);
-    if (!this->IsKey(k) || k->FilterKey(filter)) continue;
-    if (this->IsDeleted(i)) continue;
-    PropertyDetails details = this->DetailsAt(i);
-    PropertyAttributes attr = details.attributes();
-    if ((attr & filter) != 0) continue;
-    storage->set(index++, k);
-  }
-  if (sort_mode == Dictionary::SORTED) {
-    storage->SortPairs(storage, index);
-  }
-  DCHECK(storage->length() >= index);
-  return index - start_index;
-}
-
 template <typename Derived, typename Shape, typename Key>
 void Dictionary<Derived, Shape, Key>::CollectKeysTo(
     Handle<Dictionary<Derived, Shape, Key> > dictionary, KeyAccumulator* keys,
@@ -18690,7 +17265,7 @@
     return table;
   }
 
-  // Rehash if more than 25% of the entries are deleted entries.
+  // Rehash if more than 33% of the entries are deleted entries.
   // TODO(jochen): Consider to shrink the fixed array in place.
   if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
     table->Rehash(isolate->factory()->undefined_value());
@@ -18907,21 +17482,23 @@
 template<class Derived, class Iterator, int entrysize>
 Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Rehash(
     Handle<Derived> table, int new_capacity) {
+  Isolate* isolate = table->GetIsolate();
+  Heap* heap = isolate->heap();
   DCHECK(!table->IsObsolete());
 
-  Handle<Derived> new_table =
-      Allocate(table->GetIsolate(),
-               new_capacity,
-               table->GetHeap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
+  Handle<Derived> new_table = Allocate(
+      isolate, new_capacity, heap->InNewSpace(*table) ? NOT_TENURED : TENURED);
   int nof = table->NumberOfElements();
   int nod = table->NumberOfDeletedElements();
   int new_buckets = new_table->NumberOfBuckets();
   int new_entry = 0;
   int removed_holes_index = 0;
 
+  DisallowHeapAllocation no_gc;
+  Object* the_hole = heap->the_hole_value();
   for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
     Object* key = table->KeyAt(old_entry);
-    if (key->IsTheHole()) {
+    if (key == the_hole) {
       table->SetRemovedIndexAt(removed_holes_index++, old_entry);
       continue;
     }
diff --git a/src/objects.h b/src/objects.h
index cbc9c04..15d2d72 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -309,7 +309,7 @@
 
 // We may store the unsigned bit field as signed Smi value and do not
 // use the sign bit.
-const int kStubMajorKeyBits = 7;
+const int kStubMajorKeyBits = 8;
 const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
 
 // All Maps have a field instance_type containing a InstanceType.
@@ -398,8 +398,6 @@
   V(ALLOCATION_MEMENTO_TYPE)                                    \
   V(ALLOCATION_SITE_TYPE)                                       \
   V(SCRIPT_TYPE)                                                \
-  V(CODE_CACHE_TYPE)                                            \
-  V(POLYMORPHIC_CODE_CACHE_TYPE)                                \
   V(TYPE_FEEDBACK_INFO_TYPE)                                    \
   V(ALIASED_ARGUMENTS_ENTRY_TYPE)                               \
   V(BOX_TYPE)                                                   \
@@ -422,6 +420,7 @@
   V(JS_MODULE_TYPE)                                             \
   V(JS_GLOBAL_OBJECT_TYPE)                                      \
   V(JS_GLOBAL_PROXY_TYPE)                                       \
+  V(JS_API_OBJECT_TYPE)                                         \
   V(JS_SPECIAL_API_OBJECT_TYPE)                                 \
   V(JS_ARRAY_TYPE)                                              \
   V(JS_ARRAY_BUFFER_TYPE)                                       \
@@ -514,8 +513,6 @@
   V(SCRIPT, Script, script)                                                  \
   V(ALLOCATION_SITE, AllocationSite, allocation_site)                        \
   V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento)               \
-  V(CODE_CACHE, CodeCache, code_cache)                                       \
-  V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)    \
   V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info)                \
   V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
   V(DEBUG_INFO, DebugInfo, debug_info)                                       \
@@ -685,8 +682,6 @@
   ALLOCATION_SITE_TYPE,
   ALLOCATION_MEMENTO_TYPE,
   SCRIPT_TYPE,
-  CODE_CACHE_TYPE,
-  POLYMORPHIC_CODE_CACHE_TYPE,
   TYPE_FEEDBACK_INFO_TYPE,
   ALIASED_ARGUMENTS_ENTRY_TYPE,
   BOX_TYPE,
@@ -708,11 +703,14 @@
   JS_PROXY_TYPE,          // FIRST_JS_RECEIVER_TYPE
   JS_GLOBAL_OBJECT_TYPE,  // FIRST_JS_OBJECT_TYPE
   JS_GLOBAL_PROXY_TYPE,
-  // Like JS_OBJECT_TYPE, but requires access checks and/or has interceptors.
+  // Like JS_API_OBJECT_TYPE, but requires access checks and/or has
+  // interceptors.
   JS_SPECIAL_API_OBJECT_TYPE,  // LAST_SPECIAL_RECEIVER_TYPE
   JS_VALUE_TYPE,               // LAST_CUSTOM_ELEMENTS_RECEIVER
   JS_MESSAGE_OBJECT_TYPE,
   JS_DATE_TYPE,
+  // Like JS_OBJECT_TYPE, but created from API function.
+  JS_API_OBJECT_TYPE,
   JS_OBJECT_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GENERATOR_OBJECT_TYPE,
@@ -768,6 +766,7 @@
 };
 
 STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
+STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
 STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
 STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
 STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
@@ -981,7 +980,6 @@
   V(NormalizedMapCache)            \
   V(CompilationCacheTable)         \
   V(CodeCacheHashTable)            \
-  V(PolymorphicCodeCacheHashTable) \
   V(MapCache)                      \
   V(JSGlobalObject)                \
   V(JSGlobalProxy)                 \
@@ -1011,7 +1009,8 @@
   V(True)               \
   V(False)              \
   V(ArgumentsMarker)    \
-  V(OptimizedOut)
+  V(OptimizedOut)       \
+  V(StaleRegister)
 
 // The element types selection for CreateListFromArrayLike.
 enum class ElementTypes { kAll, kStringAndSymbol };
@@ -1162,6 +1161,10 @@
   MUST_USE_RESULT static MaybeHandle<String> ToString(Isolate* isolate,
                                                       Handle<Object> input);
 
+  // ES6 section 7.1.14 ToPropertyKey
+  MUST_USE_RESULT static MaybeHandle<Object> ToPropertyKey(
+      Isolate* isolate, Handle<Object> value);
+
   // ES6 section 7.1.15 ToLength
   MUST_USE_RESULT static MaybeHandle<Object> ToLength(Isolate* isolate,
                                                       Handle<Object> input);
@@ -1230,6 +1233,14 @@
                                                         Handle<Object> lhs,
                                                         Handle<Object> rhs);
 
+  // ES6 section 7.3.19 OrdinaryHasInstance (C, O).
+  MUST_USE_RESULT static MaybeHandle<Object> OrdinaryHasInstance(
+      Isolate* isolate, Handle<Object> callable, Handle<Object> object);
+
+  // ES6 section 12.10.4 Runtime Semantics: InstanceofOperator(O, C)
+  MUST_USE_RESULT static MaybeHandle<Object> InstanceOf(
+      Isolate* isolate, Handle<Object> object, Handle<Object> callable);
+
   MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
 
   // ES6 [[Set]] (when passed DONT_THROW)
@@ -1476,7 +1487,7 @@
   // True if this map word is a forwarding address for a scavenge
   // collection.  Only valid during a scavenge collection (specifically,
   // when all map words are heap object pointers, i.e. not during a full GC).
-  inline bool IsForwardingAddress();
+  inline bool IsForwardingAddress() const;
 
   // Create a map word from a forwarding address.
   static inline MapWord FromForwardingAddress(HeapObject* object);
@@ -1832,6 +1843,8 @@
 
   MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
       Handle<JSReceiver> object, Handle<Name> name);
+  MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
+      Handle<JSReceiver> object, uint32_t index);
 
   MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
       Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
@@ -1912,9 +1925,6 @@
 
   MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSReceiver> object);
 
-  // Tests for the fast common case for property enumeration.
-  bool IsSimpleEnum();
-
   // Returns the class name ([[Class]] property in the specification).
   String* class_name();
 
@@ -1932,6 +1942,8 @@
       Handle<JSReceiver> object, Handle<Name> name);
   MUST_USE_RESULT static inline Maybe<PropertyAttributes>
   GetOwnPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
+  MUST_USE_RESULT static inline Maybe<PropertyAttributes>
+  GetOwnPropertyAttributes(Handle<JSReceiver> object, uint32_t index);
 
   MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetElementAttributes(
       Handle<JSReceiver> object, uint32_t index);
@@ -2184,13 +2196,6 @@
   MUST_USE_RESULT static Maybe<PropertyAttributes>
       GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it);
 
-  // Retrieves an AccessorPair property from the given object. Might return
-  // undefined if the property doesn't exist or is of a different kind.
-  MUST_USE_RESULT static MaybeHandle<Object> GetAccessor(
-      Handle<JSObject> object,
-      Handle<Name> name,
-      AccessorComponent component);
-
   // Defines an AccessorPair property on the given object.
   // TODO(mstarzinger): Rename to SetAccessor().
   static MaybeHandle<Object> DefineAccessor(Handle<JSObject> object,
@@ -2214,30 +2219,6 @@
   MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
       LookupIterator* it, bool* done);
 
-  // Accessors for hidden properties object.
-  //
-  // Hidden properties are not own properties of the object itself.  Instead
-  // they are stored in an auxiliary structure kept as an own property with a
-  // special name Heap::hidden_properties_symbol(). But if the receiver is a
-  // JSGlobalProxy then the auxiliary object is a property of its prototype, and
-  // if it's a detached proxy, then you can't have hidden properties.
-
-  // Sets a hidden property on this object. Returns this object if successful,
-  // undefined if called on a detached proxy.
-  static Handle<Object> SetHiddenProperty(Handle<JSObject> object,
-                                          Handle<Name> key,
-                                          Handle<Object> value);
-  // Gets the value of a hidden property with the given key. Returns the hole
-  // if the property doesn't exist (or if called on a detached proxy),
-  // otherwise returns the value set for the key.
-  Object* GetHiddenProperty(Handle<Name> key);
-  // Deletes a hidden property. Deleting a non-existing property is
-  // considered successful.
-  static void DeleteHiddenProperty(Handle<JSObject> object,
-                                   Handle<Name> key);
-  // Returns true if the object has a property with the hidden string as name.
-  static bool HasHiddenProperties(Handle<JSObject> object);
-
   static void ValidateElements(Handle<JSObject> object);
 
   // Makes sure that this object can contain HeapObject as elements.
@@ -2299,27 +2280,7 @@
   inline Object* GetInternalField(int index);
   inline void SetInternalField(int index, Object* value);
   inline void SetInternalField(int index, Smi* value);
-
-  void CollectOwnPropertyNames(KeyAccumulator* keys,
-                               PropertyFilter filter = ALL_PROPERTIES);
-
-  // Returns the number of properties on this object filtering out properties
-  // with the specified attributes (ignoring interceptors).
-  // TODO(jkummerow): Deprecated, only used by Object.observe.
-  int NumberOfOwnElements(PropertyFilter filter);
-  // Returns the number of elements on this object filtering out elements
-  // with the specified attributes (ignoring interceptors).
-  // TODO(jkummerow): Deprecated, only used by Object.observe.
-  int GetOwnElementKeys(FixedArray* storage, PropertyFilter filter);
-
-  static void CollectOwnElementKeys(Handle<JSObject> object,
-                                    KeyAccumulator* keys,
-                                    PropertyFilter filter);
-
-  static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
-
-  static Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
-                                                    Handle<JSObject> object);
+  bool WasConstructedFromApiFunction();
 
   // Returns a new map with all transitions dropped from the object's current
   // map and the ElementsKind set.
@@ -2400,9 +2361,6 @@
 
   static bool IsExtensible(Handle<JSObject> object);
 
-  // Called the first time an object is observed with ES7 Object.observe.
-  static void SetObserved(Handle<JSObject> object);
-
   // Copy object.
   enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
 
@@ -2503,11 +2461,6 @@
 
   typedef FlexibleBodyDescriptor<JSReceiver::kPropertiesOffset> BodyDescriptor;
 
-  // Enqueue change record for Object.observe. May cause GC.
-  MUST_USE_RESULT static MaybeHandle<Object> EnqueueChangeRecord(
-      Handle<JSObject> object, const char* type, Handle<Name> name,
-      Handle<Object> old_value);
-
   // Gets the number of currently used elements.
   int GetFastElementsUsage();
 
@@ -2538,21 +2491,6 @@
                                     ElementsKind kind,
                                     Object* object);
 
-  // Return the hash table backing store or the inline stored identity hash,
-  // whatever is found.
-  MUST_USE_RESULT Object* GetHiddenPropertiesHashTable();
-
-  // Return the hash table backing store for hidden properties.  If there is no
-  // backing store, allocate one.
-  static Handle<ObjectHashTable> GetOrCreateHiddenPropertiesHashtable(
-      Handle<JSObject> object);
-
-  // Set the hidden property backing store to either a hash table or
-  // the inline-stored identity hash.
-  static Handle<Object> SetHiddenPropertiesHashTable(
-      Handle<JSObject> object,
-      Handle<Object> value);
-
   static Handle<Object> GetIdentityHash(Isolate* isolate,
                                         Handle<JSObject> object);
 
@@ -2564,10 +2502,6 @@
   MUST_USE_RESULT static Maybe<bool> PreventExtensionsWithTransition(
       Handle<JSObject> object, ShouldThrow should_throw);
 
-  MUST_USE_RESULT static Maybe<bool> SetPrototypeUnobserved(
-      Handle<JSObject> object, Handle<Object> value, bool from_javascript,
-      ShouldThrow should_throw);
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
 };
 
@@ -3101,6 +3035,11 @@
     return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorKey;
   }
 
+  static int ToValueIndex(int descriptor_number) {
+    return kFirstIndex + (descriptor_number * kDescriptorSize) +
+           kDescriptorValue;
+  }
+
  private:
   // An entry in a DescriptorArray, represented as an (array, index) pair.
   class Entry {
@@ -3116,12 +3055,6 @@
     int index_;
   };
 
-  static int ToValueIndex(int descriptor_number) {
-    return kFirstIndex +
-           (descriptor_number * kDescriptorSize) +
-           kDescriptorValue;
-  }
-
   // Transfer a complete descriptor from the src descriptor array to this
   // descriptor array.
   void CopyFrom(int index, DescriptorArray* src);
@@ -3507,22 +3440,15 @@
 
   // Returns the number of elements in the dictionary filtering out properties
   // with the specified attributes.
-  // TODO(jkummerow): Deprecated, only used by Object.observe.
   int NumberOfElementsFilterAttributes(PropertyFilter filter);
 
   // Returns the number of enumerable elements in the dictionary.
-  // TODO(jkummerow): Deprecated, only used by Object.observe.
   int NumberOfEnumElements() {
     return NumberOfElementsFilterAttributes(ENUMERABLE_STRINGS);
   }
 
   enum SortMode { UNSORTED, SORTED };
 
-  // Fill in details for properties into storage.
-  // Returns the number of properties added.
-  // TODO(jkummerow): Deprecated, only used by Object.observe.
-  int CopyKeysTo(FixedArray* storage, int index, PropertyFilter filter,
-                 SortMode sort_mode);
   // Collect the keys into the given KeyAccumulator, in ascending chronological
   // order of property creation.
   static void CollectKeysTo(Handle<Dictionary<Derived, Shape, Key> > dictionary,
@@ -3911,7 +3837,7 @@
   static Handle<Derived> Shrink(Handle<Derived> table);
 
   // Returns a new empty OrderedHashTable and records the clearing so that
-  // exisiting iterators can be updated.
+  // existing iterators can be updated.
   static Handle<Derived> Clear(Handle<Derived> table);
 
   // Returns a true if the OrderedHashTable contains the key
@@ -3925,6 +3851,8 @@
     return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
   }
 
+  // Returns the number of contiguous entries in the data table, starting at 0,
+  // that either are real entries or have been deleted.
   int UsedCapacity() { return NumberOfElements() + NumberOfDeletedElements(); }
 
   int NumberOfBuckets() {
@@ -3956,7 +3884,11 @@
     return Smi::cast(next_entry)->value();
   }
 
-  Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
+  // use KeyAt(i)->IsTheHole() to determine if this is a deleted entry.
+  Object* KeyAt(int entry) {
+    DCHECK_LT(entry, this->UsedCapacity());
+    return get(EntryToIndex(entry));
+  }
 
   bool IsObsolete() {
     return !get(kNextTableIndex)->IsSmi();
@@ -4017,6 +3949,7 @@
     set(kNumberOfDeletedElementsIndex, Smi::FromInt(num));
   }
 
+  // Returns the number elements that can fit into the allocated buffer.
   int Capacity() {
     return NumberOfBuckets() * kLoadFactor;
   }
@@ -4212,7 +4145,7 @@
 
   // Return true if this local was introduced by the compiler, and should not be
   // exposed to the user in a debugger.
-  bool LocalIsSynthetic(int var);
+  static bool VariableIsSynthetic(String* name);
 
   // Lookup support for serialized scope info. Returns the
   // the stack slot index for a given slot name if the slot is
@@ -4374,7 +4307,7 @@
   class HasSimpleParametersField
       : public BitField<bool, AsmFunctionField::kNext, 1> {};
   class FunctionKindField
-      : public BitField<FunctionKind, HasSimpleParametersField::kNext, 8> {};
+      : public BitField<FunctionKind, HasSimpleParametersField::kNext, 9> {};
 
   // BitFields representing the encoded information for context locals in the
   // ContextLocalInfoEntries part.
@@ -4426,8 +4359,13 @@
   inline byte get(int index);
   inline void set(int index, byte value);
 
+  // Copy in / copy out whole byte slices.
+  inline void copy_out(int index, byte* buffer, int length);
+  inline void copy_in(int index, const byte* buffer, int length);
+
   // Treat contents as an int array.
   inline int get_int(int index);
+  inline void set_int(int index, int value);
 
   static int SizeFor(int length) {
     return OBJECT_POINTER_ALIGN(kHeaderSize + length);
@@ -4949,18 +4887,11 @@
 
   static const char* Kind2String(Kind kind);
 
-  // Types of stubs.
-  enum StubType {
-    NORMAL,
-    FAST
-  };
-
   static const int kPrologueOffsetNotSet = -1;
 
 #ifdef ENABLE_DISASSEMBLER
   // Printing
   static const char* ICState2String(InlineCacheState state);
-  static const char* StubType2String(StubType type);
   static void PrintExtraICState(std::ostream& os,  // NOLINT
                                 Kind kind, ExtraICState extra);
   void Disassemble(const char* name, std::ostream& os);  // NOLINT
@@ -5030,21 +4961,14 @@
   inline InlineCacheState ic_state();  // Only valid for IC stubs.
   inline ExtraICState extra_ic_state();  // Only valid for IC stubs.
 
-  inline StubType type();  // Only valid for monomorphic IC stubs.
-
   // Testers for IC stub kinds.
   inline bool is_inline_cache_stub();
   inline bool is_debug_stub();
   inline bool is_handler();
-  inline bool is_load_stub();
-  inline bool is_keyed_load_stub();
-  inline bool is_store_stub();
-  inline bool is_keyed_store_stub();
   inline bool is_call_stub();
   inline bool is_binary_op_stub();
   inline bool is_compare_ic_stub();
   inline bool is_to_boolean_ic_stub();
-  inline bool is_keyed_stub();
   inline bool is_optimized_code();
   inline bool is_wasm_code();
   inline bool embeds_maps_weakly();
@@ -5151,20 +5075,6 @@
 
   // Find the first map in an IC stub.
   Map* FindFirstMap();
-  void FindAllMaps(MapHandleList* maps);
-
-  // Find the first handler in an IC stub.
-  Code* FindFirstHandler();
-
-  // Find |length| handlers and put them into |code_list|. Returns false if not
-  // enough handlers can be found.
-  bool FindHandlers(CodeHandleList* code_list, int length = -1);
-
-  // Find the handler for |map|.
-  MaybeHandle<Code> FindHandlerForMap(Map* map);
-
-  // Find the first name in an IC stub.
-  Name* FindFirstName();
 
   class FindAndReplacePattern;
   // For each (map-to-find, object-to-replace) pair in the pattern, this
@@ -5187,25 +5097,22 @@
   // Flags operations.
   static inline Flags ComputeFlags(
       Kind kind, InlineCacheState ic_state = UNINITIALIZED,
-      ExtraICState extra_ic_state = kNoExtraICState, StubType type = NORMAL,
+      ExtraICState extra_ic_state = kNoExtraICState,
       CacheHolderFlag holder = kCacheOnReceiver);
 
   static inline Flags ComputeMonomorphicFlags(
       Kind kind, ExtraICState extra_ic_state = kNoExtraICState,
-      CacheHolderFlag holder = kCacheOnReceiver, StubType type = NORMAL);
-
-  static inline Flags ComputeHandlerFlags(
-      Kind handler_kind, StubType type = NORMAL,
       CacheHolderFlag holder = kCacheOnReceiver);
 
+  static inline Flags ComputeHandlerFlags(
+      Kind handler_kind, CacheHolderFlag holder = kCacheOnReceiver);
+
   static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
-  static inline StubType ExtractTypeFromFlags(Flags flags);
   static inline CacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
   static inline Kind ExtractKindFromFlags(Flags flags);
   static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
 
-  static inline Flags RemoveTypeFromFlags(Flags flags);
-  static inline Flags RemoveTypeAndHolderFromFlags(Flags flags);
+  static inline Flags RemoveHolderFromFlags(Flags flags);
 
   // Convert a target address into a code object.
   static inline Code* GetCodeFromTargetAddress(Address address);
@@ -5261,7 +5168,6 @@
   DECLARE_VERIFIER(Code)
 
   void ClearInlineCaches();
-  void ClearInlineCaches(Kind kind);
 
   BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
   uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
@@ -5370,12 +5276,11 @@
 
   // Flags layout.  BitField<type, shift, size>.
   class ICStateField : public BitField<InlineCacheState, 0, 3> {};
-  class TypeField : public BitField<StubType, 3, 1> {};
-  class CacheHolderField : public BitField<CacheHolderFlag, 4, 2> {};
-  class KindField : public BitField<Kind, 6, 5> {};
+  class CacheHolderField : public BitField<CacheHolderFlag, 3, 2> {};
+  class KindField : public BitField<Kind, 5, 5> {};
   class ExtraICStateField
-      : public BitField<ExtraICState, 11, PlatformSmiTagging::kSmiValueSize -
-                                              11 + 1> {};  // NOLINT
+      : public BitField<ExtraICState, 10, PlatformSmiTagging::kSmiValueSize -
+                                              10 + 1> {};  // NOLINT
 
   // KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
   static const int kStackSlotsFirstBit = 0;
@@ -5425,15 +5330,12 @@
   static const int kMaxArguments = (1 << kArgumentsBits) - 1;
 
   // This constant should be encodable in an ARM instruction.
-  static const int kFlagsNotUsedInLookup =
-      TypeField::kMask | CacheHolderField::kMask;
+  static const int kFlagsNotUsedInLookup = CacheHolderField::kMask;
 
  private:
   friend class RelocIterator;
   friend class Deoptimizer;  // For FindCodeAgeSequence.
 
-  void ClearInlineCaches(Kind* kind);
-
   // Code aging
   byte* FindCodeAgeSequence();
   static void GetCodeAgeAndParity(Code* code, Age* age,
@@ -5775,10 +5677,6 @@
   inline void set_is_undetectable();
   inline bool is_undetectable();
 
-  // Tells whether the instance has a call-as-function handler.
-  inline void set_is_observed();
-  inline bool is_observed();
-
   // Tells whether the instance has a [[Call]] internal method.
   // This property is implemented according to ES6, section 7.2.3.
   inline void set_is_callable();
@@ -5849,6 +5747,7 @@
   int NumberOfFields();
 
   // TODO(ishell): candidate with JSObject::MigrateToMap().
+  bool InstancesNeedRewriting(Map* target);
   bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
                               int target_inobject, int target_unused,
                               int* old_number_of_fields);
@@ -5860,15 +5759,14 @@
   static void GeneralizeFieldType(Handle<Map> map, int modify_index,
                                   Representation new_representation,
                                   Handle<FieldType> new_field_type);
-  static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
-                                         PropertyKind new_kind,
-                                         PropertyAttributes new_attributes,
-                                         Representation new_representation,
-                                         Handle<FieldType> new_field_type,
-                                         StoreMode store_mode);
-  static Handle<Map> CopyGeneralizeAllRepresentations(
-      Handle<Map> map, int modify_index, StoreMode store_mode,
-      PropertyKind kind, PropertyAttributes attributes, const char* reason);
+
+  static inline Handle<Map> ReconfigureProperty(
+      Handle<Map> map, int modify_index, PropertyKind new_kind,
+      PropertyAttributes new_attributes, Representation new_representation,
+      Handle<FieldType> new_field_type, StoreMode store_mode);
+
+  static inline Handle<Map> ReconfigureElementsKind(
+      Handle<Map> map, ElementsKind new_elements_kind);
 
   static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
                                             int descriptor_number,
@@ -6017,8 +5915,6 @@
                                     FunctionKind kind);
 
 
-  static Handle<Map> CopyForObserved(Handle<Map> map);
-
   static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
                                               PropertyAttributes attrs_to_add,
                                               Handle<Symbol> transition_marker,
@@ -6037,8 +5933,8 @@
                                               PropertyAttributes attributes,
                                               StoreFromKeyed store_mode);
   static Handle<Map> TransitionToAccessorProperty(
-      Handle<Map> map, Handle<Name> name, int descriptor,
-      AccessorComponent component, Handle<Object> accessor,
+      Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
+      Handle<Object> getter, Handle<Object> setter,
       PropertyAttributes attributes);
   static Handle<Map> ReconfigureExistingProperty(Handle<Map> map,
                                                  int descriptor,
@@ -6086,30 +5982,15 @@
 
   static void EnsureDescriptorSlack(Handle<Map> map, int slack);
 
-  // Returns the found code or undefined if absent.
-  Object* FindInCodeCache(Name* name, Code::Flags flags);
-
-  // Returns the non-negative index of the code object if it is in the
-  // cache and -1 otherwise.
-  int IndexInCodeCache(Object* name, Code* code);
-
-  // Removes a code object from the code cache at the given index.
-  void RemoveFromCodeCache(Name* name, Code* code, int index);
+  Code* LookupInCodeCache(Name* name, Code::Flags code);
 
   // Computes a hash value for this map, to be used in HashTables and such.
   int Hash();
 
-  // Returns the map that this map transitions to if its elements_kind
-  // is changed to |elements_kind|, or NULL if no such map is cached yet.
-  // |safe_to_add_transitions| is set to false if adding transitions is not
-  // allowed.
-  Map* LookupElementsTransitionMap(ElementsKind elements_kind);
-
   // Returns the transitioned map for this map with the most generic
-  // elements_kind that's found in |candidates|, or null handle if no match is
+  // elements_kind that's found in |candidates|, or |nullptr| if no match is
   // found at all.
-  static Handle<Map> FindTransitionedMap(Handle<Map> map,
-                                         MapHandleList* candidates);
+  Map* FindElementsKindTransitionedMap(MapHandleList* candidates);
 
   inline bool CanTransition();
 
@@ -6222,9 +6103,9 @@
   static const int kHasNamedInterceptor = 2;
   static const int kHasIndexedInterceptor = 3;
   static const int kIsUndetectable = 4;
-  static const int kIsObserved = 5;
-  static const int kIsAccessCheckNeeded = 6;
-  static const int kIsConstructor = 7;
+  static const int kIsAccessCheckNeeded = 5;
+  static const int kIsConstructor = 6;
+  // Bit 7 is free.
 
   // Bit positions for bit field 2
   static const int kIsExtensible = 0;
@@ -6268,6 +6149,17 @@
       Handle<LayoutDescriptor> full_layout_descriptor);
 
  private:
+  // Returns the map that this (root) map transitions to if its elements_kind
+  // is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
+  Map* LookupElementsTransitionMap(ElementsKind elements_kind);
+
+  // Tries to replay property transitions starting from this (root) map using
+  // the descriptor array of the |map|. The |root_map| is expected to have
+  // proper elements kind and therefore elements kinds transitions are not
+  // taken by this function. Returns |nullptr| if matching transition map is
+  // not found.
+  Map* TryReplayPropertyTransitions(Map* map);
+
   static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
                                 Handle<Name> name, SimpleTransitionFlag flag);
 
@@ -6304,6 +6196,19 @@
   static Handle<Map> CopyNormalized(Handle<Map> map,
                                     PropertyNormalizationMode mode);
 
+  static Handle<Map> Reconfigure(Handle<Map> map,
+                                 ElementsKind new_elements_kind,
+                                 int modify_index, PropertyKind new_kind,
+                                 PropertyAttributes new_attributes,
+                                 Representation new_representation,
+                                 Handle<FieldType> new_field_type,
+                                 StoreMode store_mode);
+
+  static Handle<Map> CopyGeneralizeAllRepresentations(
+      Handle<Map> map, ElementsKind elements_kind, int modify_index,
+      StoreMode store_mode, PropertyKind kind, PropertyAttributes attributes,
+      const char* reason);
+
   // Fires when the layout of an object with a leaf map changes.
   // This includes adding transitions to the leaf map or changing
   // the descriptor array.
@@ -6489,9 +6394,10 @@
   // function from which eval was called.
   DECL_ACCESSORS(eval_from_shared, Object)
 
-  // [eval_from_instructions_offset]: the instruction offset in the code for the
-  // function from which eval was called where eval was called.
-  DECL_INT_ACCESSORS(eval_from_instructions_offset)
+  // [eval_from_position]: the source position in the code for the function
+  // from which eval was called, as positive integer. Or the code offset in the
+  // code from which eval was called, as negative integer.
+  DECL_INT_ACCESSORS(eval_from_position)
 
   // [shared_function_infos]: weak fixed array containing all shared
   // function infos created from this script.
@@ -6543,6 +6449,13 @@
 
   static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
 
+  // Set eval origin for stack trace formatting.
+  static void SetEvalOrigin(Handle<Script> script,
+                            Handle<SharedFunctionInfo> outer,
+                            int eval_position);
+  // Retrieve source position from where eval was called.
+  int GetEvalPosition();
+
   // Init line_ends array with source code positions of line ends.
   static void InitLineEnds(Handle<Script> script);
 
@@ -6578,10 +6491,10 @@
   static const int kLineEndsOffset = kTypeOffset + kPointerSize;
   static const int kIdOffset = kLineEndsOffset + kPointerSize;
   static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
-  static const int kEvalFrominstructionsOffsetOffset =
+  static const int kEvalFromPositionOffset =
       kEvalFromSharedOffset + kPointerSize;
   static const int kSharedFunctionInfosOffset =
-      kEvalFrominstructionsOffsetOffset + kPointerSize;
+      kEvalFromPositionOffset + kPointerSize;
   static const int kFlagsOffset = kSharedFunctionInfosOffset + kPointerSize;
   static const int kSourceUrlOffset = kFlagsOffset + kPointerSize;
   static const int kSourceMappingUrlOffset = kSourceUrlOffset + kPointerSize;
@@ -6656,9 +6569,6 @@
 
 enum BuiltinFunctionId {
   kArrayCode,
-  kGeneratorObjectNext,
-  kGeneratorObjectReturn,
-  kGeneratorObjectThrow,
 #define DECLARE_FUNCTION_ID(ignored1, ignore2, name)    \
   k##name,
   FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
@@ -6750,6 +6660,18 @@
 
   static const int kNotFound = -1;
 
+  // Helpers for assembly code that does a backwards walk of the optimized code
+  // map.
+  static const int kOffsetToPreviousContext =
+      FixedArray::kHeaderSize + kPointerSize * (kContextOffset - kEntryLength);
+  static const int kOffsetToPreviousCachedCode =
+      FixedArray::kHeaderSize +
+      kPointerSize * (kCachedCodeOffset - kEntryLength);
+  static const int kOffsetToPreviousLiterals =
+      FixedArray::kHeaderSize + kPointerSize * (kLiteralsOffset - kEntryLength);
+  static const int kOffsetToPreviousOsrAstId =
+      FixedArray::kHeaderSize + kPointerSize * (kOsrAstIdOffset - kEntryLength);
+
   // [scope_info]: Scope info.
   DECL_ACCESSORS(scope_info, ScopeInfo)
 
@@ -6960,6 +6882,13 @@
   // Indicates that this function is a generator.
   DECL_BOOLEAN_ACCESSORS(is_generator)
 
+  // Indicates that this function is an async function.
+  DECL_BOOLEAN_ACCESSORS(is_async)
+
+  // Indicates that this function can be suspended, either via YieldExpressions
+  // or AwaitExpressions.
+  inline bool is_resumable() const;
+
   // Indicates that this function is an arrow function.
   DECL_BOOLEAN_ACCESSORS(is_arrow)
 
@@ -7256,6 +7185,7 @@
     kIsGetterFunction,
     kIsSetterFunction,
     // byte 3
+    kIsAsyncFunction,
     kDeserialized,
     kIsDeclaration,
     kCompilerHintsCount,  // Pseudo entry
@@ -7278,7 +7208,7 @@
   ASSERT_FUNCTION_KIND_ORDER(kSetterFunction, kIsSetterFunction);
 #undef ASSERT_FUNCTION_KIND_ORDER
 
-  class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 8> {};
+  class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 9> {};
 
   class DeoptCountBits : public BitField<int, 0, 4> {};
   class OptReenableTriesBits : public BitField<int, 4, 18> {};
@@ -7307,6 +7237,8 @@
   static const int kStrictModeBit =
       kStrictModeFunction + kCompilerHintsSmiTagSize;
   static const int kNativeBit = kNative + kCompilerHintsSmiTagSize;
+  static const int kHasDuplicateParametersBit =
+      kHasDuplicateParameters + kCompilerHintsSmiTagSize;
 
   static const int kClassConstructorBits =
       FunctionKind::kClassConstructor
@@ -7317,6 +7249,8 @@
   // Allows to use byte-width instructions.
   static const int kStrictModeBitWithinByte = kStrictModeBit % kBitsPerByte;
   static const int kNativeBitWithinByte = kNativeBit % kBitsPerByte;
+  static const int kHasDuplicateParametersBitWithinByte =
+      kHasDuplicateParametersBit % kBitsPerByte;
 
   static const int kClassConstructorBitsWithinByte =
       FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
@@ -7336,6 +7270,8 @@
   static const int kStrictModeByteOffset = BYTE_OFFSET(kStrictModeFunction);
   static const int kNativeByteOffset = BYTE_OFFSET(kNative);
   static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
+  static const int kHasDuplicateParametersByteOffset =
+      BYTE_OFFSET(kHasDuplicateParameters);
 #undef BYTE_OFFSET
 
  private:
@@ -7375,6 +7311,10 @@
   // [input]: The most recent input value.
   DECL_ACCESSORS(input, Object)
 
+  // [resume_mode]: The most recent resume mode.
+  enum ResumeMode { kNext, kReturn, kThrow };
+  DECL_INT_ACCESSORS(resume_mode)
+
   // [continuation]: Offset into code of continuation.
   //
   // A positive offset indicates a suspended generator.  The special
@@ -7392,25 +7332,22 @@
   DECLARE_CAST(JSGeneratorObject)
 
   // Dispatched behavior.
-  DECLARE_PRINTER(JSGeneratorObject)
   DECLARE_VERIFIER(JSGeneratorObject)
 
   // Magic sentinel values for the continuation.
-  static const int kGeneratorExecuting = -1;
-  static const int kGeneratorClosed = 0;
+  static const int kGeneratorExecuting = -2;
+  static const int kGeneratorClosed = -1;
 
   // Layout description.
   static const int kFunctionOffset = JSObject::kHeaderSize;
   static const int kContextOffset = kFunctionOffset + kPointerSize;
   static const int kReceiverOffset = kContextOffset + kPointerSize;
   static const int kInputOffset = kReceiverOffset + kPointerSize;
-  static const int kContinuationOffset = kInputOffset + kPointerSize;
+  static const int kResumeModeOffset = kInputOffset + kPointerSize;
+  static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
   static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
   static const int kSize = kOperandStackOffset + kPointerSize;
 
-  // Resume mode, for use by runtime functions.
-  enum ResumeMode { NEXT, RETURN, THROW };
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
 };
@@ -7444,12 +7381,6 @@
 // JSBoundFunction describes a bound function exotic object.
 class JSBoundFunction : public JSObject {
  public:
-  // [length]: The bound function "length" property.
-  DECL_ACCESSORS(length, Object)
-
-  // [name]: The bound function "name" property.
-  DECL_ACCESSORS(name, Object)
-
   // [bound_target_function]: The wrapped function object.
   DECL_ACCESSORS(bound_target_function, JSReceiver)
 
@@ -7461,6 +7392,8 @@
   // arguments to any call to the wrapped function.
   DECL_ACCESSORS(bound_arguments, FixedArray)
 
+  static MaybeHandle<String> GetName(Isolate* isolate,
+                                     Handle<JSBoundFunction> function);
   static MaybeHandle<Context> GetFunctionRealm(
       Handle<JSBoundFunction> function);
 
@@ -7474,20 +7407,11 @@
   // to ES6 section 19.2.3.5 Function.prototype.toString ( ).
   static Handle<String> ToString(Handle<JSBoundFunction> function);
 
-  static MaybeHandle<String> GetName(Isolate* isolate,
-                                     Handle<JSBoundFunction> function);
-
   // Layout description.
   static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
   static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
   static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
-  static const int kLengthOffset = kBoundArgumentsOffset + kPointerSize;
-  static const int kNameOffset = kLengthOffset + kPointerSize;
-  static const int kSize = kNameOffset + kPointerSize;
-
-  // Indices of in-object properties.
-  static const int kLengthIndex = 0;
-  static const int kNameIndex = 1;
+  static const int kSize = kBoundArgumentsOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSBoundFunction);
@@ -7510,6 +7434,9 @@
   inline JSObject* global_proxy();
   inline Context* native_context();
 
+  static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
+  static MaybeHandle<Smi> GetLength(Isolate* isolate,
+                                    Handle<JSFunction> function);
   static Handle<Context> GetFunctionRealm(Handle<JSFunction> function);
 
   // [code]: The generated code object for this function.  Executed
@@ -7531,13 +7458,14 @@
   // Tells whether or not this function has been optimized.
   inline bool IsOptimized();
 
-  // Mark this function for lazy recompilation. The function will be
-  // recompiled the next time it is executed.
+  // Mark this function for lazy recompilation. The function will be recompiled
+  // the next time it is executed.
+  void MarkForBaseline();
   void MarkForOptimization();
   void AttemptConcurrentOptimization();
 
-  // Tells whether or not the function is already marked for lazy
-  // recompilation.
+  // Tells whether or not the function is already marked for lazy recompilation.
+  inline bool IsMarkedForBaseline();
   inline bool IsMarkedForOptimization();
   inline bool IsMarkedForConcurrentOptimization();
 
@@ -7965,7 +7893,6 @@
   DECL_ACCESSORS(source, Object)
 
   static MaybeHandle<JSRegExp> New(Handle<String> source, Flags flags);
-  static MaybeHandle<JSRegExp> New(Handle<String> source, Handle<String> flags);
   static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
 
   static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
@@ -8140,57 +8067,6 @@
 };
 
 
-class CodeCache: public Struct {
- public:
-  DECL_ACCESSORS(default_cache, FixedArray)
-  DECL_ACCESSORS(normal_type_cache, Object)
-
-  // Add the code object to the cache.
-  static void Update(
-      Handle<CodeCache> cache, Handle<Name> name, Handle<Code> code);
-
-  // Lookup code object in the cache. Returns code object if found and undefined
-  // if not.
-  Object* Lookup(Name* name, Code::Flags flags);
-
-  // Get the internal index of a code object in the cache. Returns -1 if the
-  // code object is not in that cache. This index can be used to later call
-  // RemoveByIndex. The cache cannot be modified between a call to GetIndex and
-  // RemoveByIndex.
-  int GetIndex(Object* name, Code* code);
-
-  // Remove an object from the cache with the provided internal index.
-  void RemoveByIndex(Object* name, Code* code, int index);
-
-  DECLARE_CAST(CodeCache)
-
-  // Dispatched behavior.
-  DECLARE_PRINTER(CodeCache)
-  DECLARE_VERIFIER(CodeCache)
-
-  static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
-  static const int kNormalTypeCacheOffset =
-      kDefaultCacheOffset + kPointerSize;
-  static const int kSize = kNormalTypeCacheOffset + kPointerSize;
-
- private:
-  static void UpdateDefaultCache(
-      Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code);
-  static void UpdateNormalTypeCache(
-      Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code);
-  Object* LookupDefaultCache(Name* name, Code::Flags flags);
-  Object* LookupNormalTypeCache(Name* name, Code::Flags flags);
-
-  // Code cache layout of the default cache. Elements are alternating name and
-  // code objects for non normal load/store/call IC's.
-  static const int kCodeCacheEntrySize = 2;
-  static const int kCodeCacheEntryNameOffset = 0;
-  static const int kCodeCacheEntryCodeOffset = 1;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCache);
-};
-
-
 class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
  public:
   static inline bool IsMatch(HashTableKey* key, Object* value) {
@@ -8208,7 +8084,11 @@
   static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
 
   static const int kPrefixSize = 0;
-  static const int kEntrySize = 2;
+  // The both the key (name + flags) and value (code object) can be derived from
+  // the fixed array that stores both the name and code.
+  // TODO(verwaest): Don't allocate a fixed array but inline name and code.
+  // Rewrite IsMatch to get table + index as input rather than just the raw key.
+  static const int kEntrySize = 1;
 };
 
 
@@ -8216,73 +8096,23 @@
                                            CodeCacheHashTableShape,
                                            HashTableKey*> {
  public:
-  Object* Lookup(Name* name, Code::Flags flags);
   static Handle<CodeCacheHashTable> Put(
       Handle<CodeCacheHashTable> table,
       Handle<Name> name,
       Handle<Code> code);
 
-  int GetIndex(Name* name, Code::Flags flags);
-  void RemoveByIndex(int index);
+  Code* Lookup(Name* name, Code::Flags flags);
 
   DECLARE_CAST(CodeCacheHashTable)
 
   // Initial size of the fixed array backing the hash table.
-  static const int kInitialSize = 64;
+  static const int kInitialSize = 16;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
 };
 
 
-class PolymorphicCodeCache: public Struct {
- public:
-  DECL_ACCESSORS(cache, Object)
-
-  static void Update(Handle<PolymorphicCodeCache> cache,
-                     MapHandleList* maps,
-                     Code::Flags flags,
-                     Handle<Code> code);
-
-
-  // Returns an undefined value if the entry is not found.
-  Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
-
-  DECLARE_CAST(PolymorphicCodeCache)
-
-  // Dispatched behavior.
-  DECLARE_PRINTER(PolymorphicCodeCache)
-  DECLARE_VERIFIER(PolymorphicCodeCache)
-
-  static const int kCacheOffset = HeapObject::kHeaderSize;
-  static const int kSize = kCacheOffset + kPointerSize;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCache);
-};
-
-
-class PolymorphicCodeCacheHashTable
-    : public HashTable<PolymorphicCodeCacheHashTable,
-                       CodeCacheHashTableShape,
-                       HashTableKey*> {
- public:
-  Object* Lookup(MapHandleList* maps, int code_kind);
-
-  static Handle<PolymorphicCodeCacheHashTable> Put(
-      Handle<PolymorphicCodeCacheHashTable> hash_table,
-      MapHandleList* maps,
-      int code_kind,
-      Handle<Code> code);
-
-  DECLARE_CAST(PolymorphicCodeCacheHashTable)
-
-  static const int kInitialSize = 64;
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCacheHashTable);
-};
-
-
 class TypeFeedbackInfo: public Struct {
  public:
   inline int ic_total_count();
@@ -8852,26 +8682,26 @@
   class FlatContent {
    public:
     // Returns true if the string is flat and this structure contains content.
-    bool IsFlat() { return state_ != NON_FLAT; }
+    bool IsFlat() const { return state_ != NON_FLAT; }
     // Returns true if the structure contains one-byte content.
-    bool IsOneByte() { return state_ == ONE_BYTE; }
+    bool IsOneByte() const { return state_ == ONE_BYTE; }
     // Returns true if the structure contains two-byte content.
-    bool IsTwoByte() { return state_ == TWO_BYTE; }
+    bool IsTwoByte() const { return state_ == TWO_BYTE; }
 
     // Return the one byte content of the string. Only use if IsOneByte()
     // returns true.
-    Vector<const uint8_t> ToOneByteVector() {
+    Vector<const uint8_t> ToOneByteVector() const {
       DCHECK_EQ(ONE_BYTE, state_);
       return Vector<const uint8_t>(onebyte_start, length_);
     }
     // Return the two-byte content of the string. Only use if IsTwoByte()
     // returns true.
-    Vector<const uc16> ToUC16Vector() {
+    Vector<const uc16> ToUC16Vector() const {
       DCHECK_EQ(TWO_BYTE, state_);
       return Vector<const uc16>(twobyte_start, length_);
     }
 
-    uc16 Get(int i) {
+    uc16 Get(int i) const {
       DCHECK(i < length_);
       DCHECK(state_ != NON_FLAT);
       if (state_ == ONE_BYTE) return onebyte_start[i];
@@ -9006,15 +8836,6 @@
       RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
       int* length_output = 0);
 
-  // Return a 16 bit Unicode representation of the string.
-  // The string should be nearly flat, otherwise the performance of
-  // of this method may be very bad.  Setting robustness_flag to
-  // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust  This means it
-  // handles unexpected data without causing assert failures and it does not
-  // do any heap allocations.  This is useful when printing stack traces.
-  base::SmartArrayPointer<uc16> ToWideCString(
-      RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
-
   bool ComputeArrayIndex(uint32_t* index);
 
   // Externalization.
@@ -9589,6 +9410,10 @@
 // The Oddball describes objects null, undefined, true, and false.
 class Oddball: public HeapObject {
  public:
+  // [to_number_raw]: Cached raw to_number computed at startup.
+  inline double to_number_raw() const;
+  inline void set_to_number_raw(double value);
+
   // [to_string]: Cached to_string computed at startup.
   DECL_ACCESSORS(to_string, String)
 
@@ -9618,7 +9443,8 @@
                          bool to_boolean, const char* type_of, byte kind);
 
   // Layout description.
-  static const int kToStringOffset = HeapObject::kHeaderSize;
+  static const int kToNumberRawOffset = HeapObject::kHeaderSize;
+  static const int kToStringOffset = kToNumberRawOffset + kDoubleSize;
   static const int kToNumberOffset = kToStringOffset + kPointerSize;
   static const int kToBooleanOffset = kToNumberOffset + kPointerSize;
   static const int kTypeOfOffset = kToBooleanOffset + kPointerSize;
@@ -9636,10 +9462,12 @@
   static const byte kOther = 7;
   static const byte kException = 8;
   static const byte kOptimizedOut = 9;
+  static const byte kStaleRegister = 10;
 
   typedef FixedBodyDescriptor<kToStringOffset, kTypeOfOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
+  STATIC_ASSERT(kToNumberRawOffset == HeapNumber::kValueOffset);
   STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
   STATIC_ASSERT(kNull == Internals::kNullOddballKind);
   STATIC_ASSERT(kUndefined == Internals::kUndefinedOddballKind);
@@ -10305,9 +10133,6 @@
   inline bool AllowsSetLength();
 
   static void SetLength(Handle<JSArray> array, uint32_t length);
-  // Same as above but will also queue splice records if |array| is observed.
-  static MaybeHandle<Object> ObservableSetLength(Handle<JSArray> array,
-                                                 uint32_t length);
 
   // Set the content of the array to the content of storage.
   static inline void SetContent(Handle<JSArray> array,
@@ -10396,10 +10221,18 @@
   DECL_ACCESSORS(name, Object)
   DECL_INT_ACCESSORS(flag)
   DECL_ACCESSORS(expected_receiver_type, Object)
+  // This directly points at a foreign C function to be used from the runtime.
   DECL_ACCESSORS(getter, Object)
   DECL_ACCESSORS(setter, Object)
+  // This either points at the same as above, or a trampoline in case we are
+  // running with the simulator. Use these entries from generated code.
+  DECL_ACCESSORS(js_getter, Object)
   DECL_ACCESSORS(data, Object)
 
+  static Address redirect(Isolate* isolate, Address address,
+                          AccessorComponent component);
+  Address redirected_getter() const;
+
   // Dispatched behavior.
   DECLARE_PRINTER(AccessorInfo)
 
@@ -10438,9 +10271,10 @@
   static const int kNameOffset = HeapObject::kHeaderSize;
   static const int kFlagOffset = kNameOffset + kPointerSize;
   static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
-  static const int kGetterOffset = kExpectedReceiverTypeOffset + kPointerSize;
-  static const int kSetterOffset = kGetterOffset + kPointerSize;
-  static const int kDataOffset = kSetterOffset + kPointerSize;
+  static const int kSetterOffset = kExpectedReceiverTypeOffset + kPointerSize;
+  static const int kGetterOffset = kSetterOffset + kPointerSize;
+  static const int kJsGetterOffset = kGetterOffset + kPointerSize;
+  static const int kDataOffset = kJsGetterOffset + kPointerSize;
   static const int kSize = kDataOffset + kPointerSize;
 
 
diff --git a/src/optimizing-compile-dispatcher.cc b/src/optimizing-compile-dispatcher.cc
index ed20224..7077339 100644
--- a/src/optimizing-compile-dispatcher.cc
+++ b/src/optimizing-compile-dispatcher.cc
@@ -15,15 +15,12 @@
 
 namespace {
 
-void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
-                                bool restore_function_code) {
-  // The recompile job is allocated in the CompilationInfo's zone.
-  CompilationInfo* info = job->info();
+void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
   if (restore_function_code) {
-    Handle<JSFunction> function = info->closure();
+    Handle<JSFunction> function = job->info()->closure();
     function->ReplaceCode(function->shared()->code());
   }
-  delete info;
+  delete job;
 }
 
 }  // namespace
@@ -85,35 +82,29 @@
   DeleteArray(input_queue_);
 }
 
-
-OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
-    bool check_if_flushing) {
+CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
   base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
   if (input_queue_length_ == 0) return NULL;
-  OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
+  CompilationJob* job = input_queue_[InputQueueIndex(0)];
   DCHECK_NOT_NULL(job);
   input_queue_shift_ = InputQueueIndex(1);
   input_queue_length_--;
   if (check_if_flushing) {
     if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
-      if (!job->info()->is_osr()) {
-        AllowHandleDereference allow_handle_dereference;
-        DisposeOptimizedCompileJob(job, true);
-      }
+      AllowHandleDereference allow_handle_dereference;
+      DisposeCompilationJob(job, true);
       return NULL;
     }
   }
   return job;
 }
 
-
-void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
+void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
   if (!job) return;
 
   // The function may have already been optimized by OSR.  Simply continue.
-  OptimizedCompileJob::Status status = job->OptimizeGraph();
-  USE(status);  // Prevent an unused-variable error in release mode.
-  DCHECK(status != OptimizedCompileJob::FAILED);
+  CompilationJob::Status status = job->OptimizeGraph();
+  USE(status);  // Prevent an unused-variable error.
 
   // The function may have already been optimized by OSR.  Simply continue.
   // Use a mutex to make sure that functions marked for install
@@ -126,7 +117,7 @@
 
 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
   for (;;) {
-    OptimizedCompileJob* job = NULL;
+    CompilationJob* job = NULL;
     {
       base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
       if (output_queue_.empty()) return;
@@ -134,10 +125,7 @@
       output_queue_.pop();
     }
 
-    // OSR jobs are dealt with separately.
-    if (!job->info()->is_osr()) {
-      DisposeOptimizedCompileJob(job, restore_function_code);
-    }
+    DisposeCompilationJob(job, restore_function_code);
   }
 }
 
@@ -181,7 +169,7 @@
   HandleScope handle_scope(isolate_);
 
   for (;;) {
-    OptimizedCompileJob* job = NULL;
+    CompilationJob* job = NULL;
     {
       base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
       if (output_queue_.empty()) return;
@@ -196,16 +184,14 @@
         function->ShortPrint();
         PrintF(" as it has already been optimized.\n");
       }
-      DisposeOptimizedCompileJob(job, false);
+      DisposeCompilationJob(job, false);
     } else {
-      Compiler::FinalizeOptimizedCompileJob(job);
+      Compiler::FinalizeCompilationJob(job);
     }
   }
 }
 
-
-void OptimizingCompileDispatcher::QueueForOptimization(
-    OptimizedCompileJob* job) {
+void OptimizingCompileDispatcher::QueueForOptimization(CompilationJob* job) {
   DCHECK(IsQueueAvailable());
   {
     // Add job to the back of the input queue.
diff --git a/src/optimizing-compile-dispatcher.h b/src/optimizing-compile-dispatcher.h
index e14e8aa..2b20a53 100644
--- a/src/optimizing-compile-dispatcher.h
+++ b/src/optimizing-compile-dispatcher.h
@@ -17,8 +17,7 @@
 namespace v8 {
 namespace internal {
 
-class HOptimizedGraphBuilder;
-class OptimizedCompileJob;
+class CompilationJob;
 class SharedFunctionInfo;
 
 class OptimizingCompileDispatcher {
@@ -32,7 +31,7 @@
         ref_count_(0),
         recompilation_delay_(FLAG_concurrent_recompilation_delay) {
     base::NoBarrier_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
-    input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
+    input_queue_ = NewArray<CompilationJob*>(input_queue_capacity_);
   }
 
   ~OptimizingCompileDispatcher();
@@ -40,7 +39,7 @@
   void Run();
   void Stop();
   void Flush();
-  void QueueForOptimization(OptimizedCompileJob* optimizing_compiler);
+  void QueueForOptimization(CompilationJob* job);
   void Unblock();
   void InstallOptimizedFunctions();
 
@@ -57,8 +56,8 @@
   enum ModeFlag { COMPILE, FLUSH };
 
   void FlushOutputQueue(bool restore_function_code);
-  void CompileNext(OptimizedCompileJob* job);
-  OptimizedCompileJob* NextInput(bool check_if_flushing = false);
+  void CompileNext(CompilationJob* job);
+  CompilationJob* NextInput(bool check_if_flushing = false);
 
   inline int InputQueueIndex(int i) {
     int result = (i + input_queue_shift_) % input_queue_capacity_;
@@ -70,14 +69,14 @@
   Isolate* isolate_;
 
   // Circular queue of incoming recompilation tasks (including OSR).
-  OptimizedCompileJob** input_queue_;
+  CompilationJob** input_queue_;
   int input_queue_capacity_;
   int input_queue_length_;
   int input_queue_shift_;
   base::Mutex input_queue_mutex_;
 
   // Queue of recompilation tasks ready to be installed (excluding OSR).
-  std::queue<OptimizedCompileJob*> output_queue_;
+  std::queue<CompilationJob*> output_queue_;
   // Used for job based recompilation which has multiple producers on
   // different threads.
   base::Mutex output_queue_mutex_;
diff --git a/src/parsing/expression-classifier.h b/src/parsing/expression-classifier.h
index 71fa3d3..3f70ed8 100644
--- a/src/parsing/expression-classifier.h
+++ b/src/parsing/expression-classifier.h
@@ -39,17 +39,23 @@
     ArrowFormalParametersProduction = 1 << 6,
     LetPatternProduction = 1 << 7,
     CoverInitializedNameProduction = 1 << 8,
+    TailCallExpressionProduction = 1 << 9,
+    AsyncArrowFormalParametersProduction = 1 << 10,
+    AsyncBindingPatternProduction = 1 << 11,
 
     ExpressionProductions =
-        (ExpressionProduction | FormalParameterInitializerProduction),
-    PatternProductions = (BindingPatternProduction |
-                          AssignmentPatternProduction | LetPatternProduction),
+        (ExpressionProduction | FormalParameterInitializerProduction |
+         TailCallExpressionProduction),
+    PatternProductions =
+        (BindingPatternProduction | AssignmentPatternProduction |
+         LetPatternProduction | AsyncBindingPatternProduction),
     FormalParametersProductions = (DistinctFormalParametersProduction |
                                    StrictModeFormalParametersProduction),
     StandardProductions = ExpressionProductions | PatternProductions,
     AllProductions =
         (StandardProductions | FormalParametersProductions |
-         ArrowFormalParametersProduction | CoverInitializedNameProduction)
+         ArrowFormalParametersProduction | CoverInitializedNameProduction |
+         AsyncArrowFormalParametersProduction | AsyncBindingPatternProduction)
   };
 
   enum FunctionProperties { NonSimpleParameter = 1 << 0 };
@@ -110,6 +116,14 @@
 
   bool is_valid_let_pattern() const { return is_valid(LetPatternProduction); }
 
+  bool is_valid_async_arrow_formal_parameters() const {
+    return is_valid(AsyncArrowFormalParametersProduction);
+  }
+
+  bool is_valid_async_binding_pattern() const {
+    return is_valid(AsyncBindingPatternProduction);
+  }
+
   const Error& expression_error() const { return expression_error_; }
 
   const Error& formal_parameter_initializer_error() const {
@@ -143,6 +157,20 @@
     return cover_initialized_name_error_;
   }
 
+  bool has_tail_call_expression() const {
+    return !is_valid(TailCallExpressionProduction);
+  }
+  const Error& tail_call_expression_error() const {
+    return tail_call_expression_error_;
+  }
+  const Error& async_arrow_formal_parameters_error() const {
+    return async_arrow_formal_parameters_error_;
+  }
+
+  const Error& async_binding_pattern_error() const {
+    return async_binding_pattern_error_;
+  }
+
   bool is_simple_parameter_list() const {
     return !(function_properties_ & NonSimpleParameter);
   }
@@ -219,6 +247,26 @@
     arrow_formal_parameters_error_.arg = arg;
   }
 
+  void RecordAsyncArrowFormalParametersError(const Scanner::Location& loc,
+                                             MessageTemplate::Template message,
+                                             const char* arg = nullptr) {
+    if (!is_valid_async_arrow_formal_parameters()) return;
+    invalid_productions_ |= AsyncArrowFormalParametersProduction;
+    async_arrow_formal_parameters_error_.location = loc;
+    async_arrow_formal_parameters_error_.message = message;
+    async_arrow_formal_parameters_error_.arg = arg;
+  }
+
+  void RecordAsyncBindingPatternError(const Scanner::Location& loc,
+                                      MessageTemplate::Template message,
+                                      const char* arg = nullptr) {
+    if (!is_valid_async_binding_pattern()) return;
+    invalid_productions_ |= AsyncBindingPatternProduction;
+    async_binding_pattern_error_.location = loc;
+    async_binding_pattern_error_.message = message;
+    async_binding_pattern_error_.arg = arg;
+  }
+
   void RecordDuplicateFormalParameterError(const Scanner::Location& loc) {
     if (!is_valid_formal_parameter_list_without_duplicates()) return;
     invalid_productions_ |= DistinctFormalParametersProduction;
@@ -260,6 +308,16 @@
     cover_initialized_name_error_.arg = arg;
   }
 
+  void RecordTailCallExpressionError(const Scanner::Location& loc,
+                                     MessageTemplate::Template message,
+                                     const char* arg = nullptr) {
+    if (has_tail_call_expression()) return;
+    invalid_productions_ |= TailCallExpressionProduction;
+    tail_call_expression_error_.location = loc;
+    tail_call_expression_error_.message = message;
+    tail_call_expression_error_.arg = arg;
+  }
+
   void ForgiveCoverInitializedNameError() {
     invalid_productions_ &= ~CoverInitializedNameProduction;
     cover_initialized_name_error_ = Error();
@@ -305,6 +363,13 @@
         let_pattern_error_ = inner->let_pattern_error_;
       if (errors & CoverInitializedNameProduction)
         cover_initialized_name_error_ = inner->cover_initialized_name_error_;
+      if (errors & TailCallExpressionProduction)
+        tail_call_expression_error_ = inner->tail_call_expression_error_;
+      if (errors & AsyncArrowFormalParametersProduction)
+        async_arrow_formal_parameters_error_ =
+            inner->async_arrow_formal_parameters_error_;
+      if (errors & AsyncBindingPatternProduction)
+        async_binding_pattern_error_ = inner->async_binding_pattern_error_;
     }
 
     // As an exception to the above, the result continues to be a valid arrow
@@ -340,6 +405,8 @@
   int non_pattern_begin_;
   unsigned invalid_productions_;
   unsigned function_properties_;
+  // TODO(ishell): consider using Zone[Hash]Map<TargetProduction, Error>
+  // here to consume less stack space during parsing.
   Error expression_error_;
   Error formal_parameter_initializer_error_;
   Error binding_pattern_error_;
@@ -349,6 +416,9 @@
   Error strict_mode_formal_parameter_error_;
   Error let_pattern_error_;
   Error cover_initialized_name_error_;
+  Error tail_call_expression_error_;
+  Error async_arrow_formal_parameters_error_;
+  Error async_binding_pattern_error_;
   DuplicateFinder* duplicate_finder_;
 };
 
diff --git a/src/parsing/parameter-initializer-rewriter.cc b/src/parsing/parameter-initializer-rewriter.cc
index 3e3587b..6362c63 100644
--- a/src/parsing/parameter-initializer-rewriter.cc
+++ b/src/parsing/parameter-initializer-rewriter.cc
@@ -4,6 +4,10 @@
 
 #include "src/parsing/parameter-initializer-rewriter.h"
 
+#include <algorithm>
+#include <utility>
+#include <vector>
+
 #include "src/ast/ast.h"
 #include "src/ast/ast-expression-visitor.h"
 #include "src/ast/scopes.h"
@@ -21,6 +25,7 @@
       : AstExpressionVisitor(stack_limit, initializer),
         old_scope_(old_scope),
         new_scope_(new_scope) {}
+  ~Rewriter();
 
  private:
   void VisitExpression(Expression* expr) override {}
@@ -29,10 +34,32 @@
   void VisitClassLiteral(ClassLiteral* expr) override;
   void VisitVariableProxy(VariableProxy* expr) override;
 
+  void VisitBlock(Block* stmt) override;
+  void VisitTryCatchStatement(TryCatchStatement* stmt) override;
+  void VisitWithStatement(WithStatement* stmt) override;
+
   Scope* old_scope_;
   Scope* new_scope_;
+  std::vector<std::pair<Variable*, int>> temps_;
 };
 
+struct LessThanSecond {
+  bool operator()(const std::pair<Variable*, int>& left,
+                  const std::pair<Variable*, int>& right) {
+    return left.second < right.second;
+  }
+};
+
+Rewriter::~Rewriter() {
+  if (!temps_.empty()) {
+    // Ensure that we add temporaries in the order they appeared in old_scope_.
+    std::sort(temps_.begin(), temps_.end(), LessThanSecond());
+    for (auto var_and_index : temps_) {
+      var_and_index.first->set_scope(new_scope_);
+      new_scope_->AddTemporary(var_and_index.first);
+    }
+  }
+}
 
 void Rewriter::VisitFunctionLiteral(FunctionLiteral* function_literal) {
   function_literal->scope()->ReplaceOuterScope(new_scope_);
@@ -63,9 +90,13 @@
   if (proxy->is_resolved()) {
     Variable* var = proxy->var();
     if (var->mode() != TEMPORARY) return;
-    if (old_scope_->RemoveTemporary(var)) {
-      var->set_scope(new_scope_);
-      new_scope_->AddTemporary(var);
+    // For rewriting inside the same ClosureScope (e.g., putting default
+    // parameter values in their own inner scope in certain cases), refrain
+    // from invalidly moving temporaries to a block scope.
+    if (var->scope()->ClosureScope() == new_scope_->ClosureScope()) return;
+    int index = old_scope_->RemoveTemporary(var);
+    if (index >= 0) {
+      temps_.push_back(std::make_pair(var, index));
     }
   } else if (old_scope_->RemoveUnresolved(proxy)) {
     new_scope_->AddUnresolved(proxy);
@@ -73,6 +104,26 @@
 }
 
 
+void Rewriter::VisitBlock(Block* stmt) {
+  if (stmt->scope() != nullptr)
+    stmt->scope()->ReplaceOuterScope(new_scope_);
+  else
+    VisitStatements(stmt->statements());
+}
+
+
+void Rewriter::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  Visit(stmt->try_block());
+  stmt->scope()->ReplaceOuterScope(new_scope_);
+}
+
+
+void Rewriter::VisitWithStatement(WithStatement* stmt) {
+  Visit(stmt->expression());
+  stmt->scope()->ReplaceOuterScope(new_scope_);
+}
+
+
 }  // anonymous namespace
 
 
diff --git a/src/parsing/parser-base.h b/src/parsing/parser-base.h
index dde6b1d..6086f7a 100644
--- a/src/parsing/parser-base.h
+++ b/src/parsing/parser-base.h
@@ -29,6 +29,86 @@
   kDisallowLabelledFunctionStatement,
 };
 
+enum class FunctionBody { Normal, SingleExpression };
+
+enum class ParseFunctionFlags {
+  kIsNormal = 0,
+  kIsGenerator = 1,
+  kIsAsync = 2,
+  kIsDefault = 4
+};
+
+static inline ParseFunctionFlags operator|(ParseFunctionFlags lhs,
+                                           ParseFunctionFlags rhs) {
+  typedef unsigned char T;
+  return static_cast<ParseFunctionFlags>(static_cast<T>(lhs) |
+                                         static_cast<T>(rhs));
+}
+
+static inline ParseFunctionFlags& operator|=(ParseFunctionFlags& lhs,
+                                             const ParseFunctionFlags& rhs) {
+  lhs = lhs | rhs;
+  return lhs;
+}
+
+static inline bool operator&(ParseFunctionFlags bitfield,
+                             ParseFunctionFlags mask) {
+  typedef unsigned char T;
+  return static_cast<T>(bitfield) & static_cast<T>(mask);
+}
+
+enum class MethodKind {
+  Normal = 0,
+  Static = 1 << 0,
+  Generator = 1 << 1,
+  StaticGenerator = Static | Generator,
+  Async = 1 << 2,
+  StaticAsync = Static | Async,
+
+  /* Any non-ordinary method kinds */
+  SpecialMask = Generator | Async
+};
+
+inline bool IsValidMethodKind(MethodKind kind) {
+  return kind == MethodKind::Normal || kind == MethodKind::Static ||
+         kind == MethodKind::Generator || kind == MethodKind::StaticGenerator ||
+         kind == MethodKind::Async || kind == MethodKind::StaticAsync;
+}
+
+static inline MethodKind operator|(MethodKind lhs, MethodKind rhs) {
+  typedef unsigned char T;
+  return static_cast<MethodKind>(static_cast<T>(lhs) | static_cast<T>(rhs));
+}
+
+static inline MethodKind& operator|=(MethodKind& lhs, const MethodKind& rhs) {
+  lhs = lhs | rhs;
+  DCHECK(IsValidMethodKind(lhs));
+  return lhs;
+}
+
+static inline bool operator&(MethodKind bitfield, MethodKind mask) {
+  typedef unsigned char T;
+  return static_cast<T>(bitfield) & static_cast<T>(mask);
+}
+
+inline bool IsNormalMethod(MethodKind kind) {
+  return kind == MethodKind::Normal;
+}
+
+inline bool IsSpecialMethod(MethodKind kind) {
+  return kind & MethodKind::SpecialMask;
+}
+
+inline bool IsStaticMethod(MethodKind kind) {
+  return kind & MethodKind::Static;
+}
+
+inline bool IsGeneratorMethod(MethodKind kind) {
+  return kind & MethodKind::Generator;
+}
+
+inline bool IsAsyncMethod(MethodKind kind) { return kind & MethodKind::Async; }
+
 struct FormalParametersBase {
   explicit FormalParametersBase(Scope* scope) : scope(scope) {}
   Scope* scope;
@@ -98,7 +178,6 @@
              v8::Extension* extension, AstValueFactory* ast_value_factory,
              ParserRecorder* log, typename Traits::Type::Parser this_object)
       : Traits(this_object),
-        parenthesized_function_(false),
         scope_(NULL),
         function_state_(NULL),
         extension_(extension),
@@ -106,6 +185,7 @@
         ast_value_factory_(ast_value_factory),
         log_(log),
         mode_(PARSE_EAGERLY),  // Lazy mode must be set explicitly.
+        parsing_module_(false),
         stack_limit_(stack_limit),
         zone_(zone),
         scanner_(scanner),
@@ -113,13 +193,12 @@
         allow_lazy_(false),
         allow_natives_(false),
         allow_tailcalls_(false),
-        allow_harmony_sloppy_(false),
-        allow_harmony_sloppy_function_(false),
-        allow_harmony_sloppy_let_(false),
         allow_harmony_restrictive_declarations_(false),
         allow_harmony_do_expressions_(false),
+        allow_harmony_for_in_(false),
         allow_harmony_function_name_(false),
-        allow_harmony_function_sent_(false) {}
+        allow_harmony_function_sent_(false),
+        allow_harmony_async_await_(false) {}
 
 #define ALLOW_ACCESSORS(name)                           \
   bool allow_##name() const { return allow_##name##_; } \
@@ -134,13 +213,12 @@
   ALLOW_ACCESSORS(lazy);
   ALLOW_ACCESSORS(natives);
   ALLOW_ACCESSORS(tailcalls);
-  ALLOW_ACCESSORS(harmony_sloppy);
-  ALLOW_ACCESSORS(harmony_sloppy_function);
-  ALLOW_ACCESSORS(harmony_sloppy_let);
   ALLOW_ACCESSORS(harmony_restrictive_declarations);
   ALLOW_ACCESSORS(harmony_do_expressions);
+  ALLOW_ACCESSORS(harmony_for_in);
   ALLOW_ACCESSORS(harmony_function_name);
   ALLOW_ACCESSORS(harmony_function_sent);
+  ALLOW_ACCESSORS(harmony_async_await);
   SCANNER_ACCESSORS(harmony_exponentiation_operator);
 
 #undef SCANNER_ACCESSORS
@@ -195,6 +273,64 @@
     Scope* scope;
   };
 
+  class TailCallExpressionList {
+   public:
+    explicit TailCallExpressionList(Zone* zone)
+        : zone_(zone), expressions_(0, zone), has_explicit_tail_calls_(false) {}
+
+    const ZoneList<ExpressionT>& expressions() const { return expressions_; }
+    const Scanner::Location& location() const { return loc_; }
+
+    bool has_explicit_tail_calls() const { return has_explicit_tail_calls_; }
+
+    void Swap(TailCallExpressionList& other) {
+      expressions_.Swap(&other.expressions_);
+      std::swap(loc_, other.loc_);
+      std::swap(has_explicit_tail_calls_, other.has_explicit_tail_calls_);
+    }
+
+    void AddImplicitTailCall(ExpressionT expr) {
+      expressions_.Add(expr, zone_);
+    }
+
+    void AddExplicitTailCall(ExpressionT expr, const Scanner::Location& loc) {
+      if (!has_explicit_tail_calls()) {
+        loc_ = loc;
+        has_explicit_tail_calls_ = true;
+      }
+      expressions_.Add(expr, zone_);
+    }
+
+    void Append(const TailCallExpressionList& other) {
+      if (!has_explicit_tail_calls()) {
+        loc_ = other.loc_;
+        has_explicit_tail_calls_ = other.has_explicit_tail_calls_;
+      }
+      expressions_.AddAll(other.expressions_, zone_);
+    }
+
+   private:
+    Zone* zone_;
+    ZoneList<ExpressionT> expressions_;
+    Scanner::Location loc_;
+    bool has_explicit_tail_calls_;
+  };
+
+  // Defines whether tail call expressions are allowed or not.
+  enum class ReturnExprContext {
+    // We are inside return statement which is allowed to contain tail call
+    // expressions. Tail call expressions are allowed.
+    kInsideValidReturnStatement,
+
+    // We are inside a block in which tail call expressions are allowed but
+    // not yet inside a return statement.
+    kInsideValidBlock,
+
+    // Tail call expressions are not allowed in the following blocks.
+    kInsideTryBlock,
+    kInsideForInOfBody,
+  };
+
   class FunctionState BASE_EMBEDDED {
    public:
     FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
@@ -230,6 +366,8 @@
     }
 
     bool is_generator() const { return IsGeneratorFunction(kind_); }
+    bool is_async_function() const { return IsAsyncFunction(kind_); }
+    bool is_resumable() const { return is_generator() || is_async_function(); }
 
     FunctionKind kind() const { return kind_; }
     FunctionState* outer() const { return outer_function_state_; }
@@ -237,7 +375,7 @@
     void set_generator_object_variable(
         typename Traits::Type::GeneratorVariable* variable) {
       DCHECK(variable != NULL);
-      DCHECK(is_generator());
+      DCHECK(is_resumable());
       generator_object_variable_ = variable;
     }
     typename Traits::Type::GeneratorVariable* generator_object_variable()
@@ -252,26 +390,43 @@
       return destructuring_assignments_to_rewrite_;
     }
 
-    List<ExpressionT>& expressions_in_tail_position() {
-      return expressions_in_tail_position_;
+    TailCallExpressionList& tail_call_expressions() {
+      return tail_call_expressions_;
     }
-    void AddExpressionInTailPosition(ExpressionT expression) {
-      if (collect_expressions_in_tail_position_) {
-        expressions_in_tail_position_.Add(expression);
+    void AddImplicitTailCallExpression(ExpressionT expression) {
+      if (return_expr_context() ==
+          ReturnExprContext::kInsideValidReturnStatement) {
+        tail_call_expressions_.AddImplicitTailCall(expression);
+      }
+    }
+    void AddExplicitTailCallExpression(ExpressionT expression,
+                                       const Scanner::Location& loc) {
+      DCHECK(expression->IsCall());
+      if (return_expr_context() ==
+          ReturnExprContext::kInsideValidReturnStatement) {
+        tail_call_expressions_.AddExplicitTailCall(expression, loc);
       }
     }
 
-    bool collect_expressions_in_tail_position() const {
-      return collect_expressions_in_tail_position_;
+    ReturnExprContext return_expr_context() const {
+      return return_expr_context_;
     }
-    void set_collect_expressions_in_tail_position(bool collect) {
-      collect_expressions_in_tail_position_ = collect;
+    void set_return_expr_context(ReturnExprContext context) {
+      return_expr_context_ = context;
     }
 
     ZoneList<ExpressionT>* non_patterns_to_rewrite() {
       return &non_patterns_to_rewrite_;
     }
 
+    void next_function_is_parenthesized(bool parenthesized) {
+      next_function_is_parenthesized_ = parenthesized;
+    }
+
+    bool this_function_is_parenthesized() const {
+      return this_function_is_parenthesized_;
+    }
+
    private:
     void AddDestructuringAssignment(DestructuringAssignment pair) {
       destructuring_assignments_to_rewrite_.Add(pair);
@@ -312,17 +467,67 @@
     Scope* outer_scope_;
 
     List<DestructuringAssignment> destructuring_assignments_to_rewrite_;
-    List<ExpressionT> expressions_in_tail_position_;
-    bool collect_expressions_in_tail_position_;
+    TailCallExpressionList tail_call_expressions_;
+    ReturnExprContext return_expr_context_;
     ZoneList<ExpressionT> non_patterns_to_rewrite_;
 
     typename Traits::Type::Factory* factory_;
 
+    // If true, the next (and immediately following) function literal is
+    // preceded by a parenthesis.
+    bool next_function_is_parenthesized_;
+
+    // The value of the parents' next_function_is_parenthesized_, as it applies
+    // to this function. Filled in by constructor.
+    bool this_function_is_parenthesized_;
+
     friend class ParserTraits;
     friend class PreParserTraits;
     friend class Checkpoint;
   };
 
+  // This scope sets current ReturnExprContext to given value.
+  class ReturnExprScope {
+   public:
+    explicit ReturnExprScope(FunctionState* function_state,
+                             ReturnExprContext return_expr_context)
+        : function_state_(function_state),
+          sav_return_expr_context_(function_state->return_expr_context()) {
+      // Don't update context if we are requested to enable tail call
+      // expressions but current block does not allow them.
+      if (return_expr_context !=
+              ReturnExprContext::kInsideValidReturnStatement ||
+          sav_return_expr_context_ == ReturnExprContext::kInsideValidBlock) {
+        function_state->set_return_expr_context(return_expr_context);
+      }
+    }
+    ~ReturnExprScope() {
+      function_state_->set_return_expr_context(sav_return_expr_context_);
+    }
+
+   private:
+    FunctionState* function_state_;
+    ReturnExprContext sav_return_expr_context_;
+  };
+
+  // Collects all return expressions at tail call position in this scope
+  // to a separate list.
+  class CollectExpressionsInTailPositionToListScope {
+   public:
+    CollectExpressionsInTailPositionToListScope(FunctionState* function_state,
+                                                TailCallExpressionList* list)
+        : function_state_(function_state), list_(list) {
+      function_state->tail_call_expressions().Swap(*list_);
+    }
+    ~CollectExpressionsInTailPositionToListScope() {
+      function_state_->tail_call_expressions().Swap(*list_);
+    }
+
+   private:
+    FunctionState* function_state_;
+    TailCallExpressionList* list_;
+  };
+
   // Annoyingly, arrow functions first parse as comma expressions, then when we
   // see the => we have to go back and reinterpret the arguments as being formal
   // parameters.  To do so we need to reset some of the parser state back to
@@ -455,7 +660,8 @@
 
   bool peek_any_identifier() {
     Token::Value next = peek();
-    return next == Token::IDENTIFIER || next == Token::FUTURE_RESERVED_WORD ||
+    return next == Token::IDENTIFIER || next == Token::ENUM ||
+           next == Token::AWAIT || next == Token::ASYNC ||
            next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
            next == Token::STATIC || next == Token::YIELD;
   }
@@ -512,6 +718,18 @@
       *ok = false;
     }
   }
+  // for now, this check just collects statistics.
+  void CheckDecimalLiteralWithLeadingZero(int* use_counts, int beg_pos,
+                                          int end_pos) {
+    Scanner::Location token_location =
+        scanner()->decimal_with_leading_zero_position();
+    if (token_location.IsValid() && beg_pos <= token_location.beg_pos &&
+        token_location.end_pos <= end_pos) {
+      scanner()->clear_decimal_with_leading_zero_position();
+      if (use_counts != nullptr)
+        ++use_counts[v8::Isolate::kDecimalWithLeadingZeroInStrictMode];
+    }
+  }
 
   inline void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
     CheckOctalLiteral(beg_pos, end_pos, MessageTemplate::kStrictOctalLiteral,
@@ -563,14 +781,10 @@
 
   LanguageMode language_mode() { return scope_->language_mode(); }
   bool is_generator() const { return function_state_->is_generator(); }
-
-  bool allow_const() {
-    return is_strict(language_mode()) || allow_harmony_sloppy();
+  bool is_async_function() const {
+    return function_state_->is_async_function();
   }
-
-  bool allow_let() {
-    return is_strict(language_mode()) || allow_harmony_sloppy_let();
-  }
+  bool is_resumable() const { return function_state_->is_resumable(); }
 
   // Report syntax errors.
   void ReportMessage(MessageTemplate::Template message, const char* arg = NULL,
@@ -627,8 +841,16 @@
 
   void ValidateBindingPattern(const ExpressionClassifier* classifier,
                               bool* ok) {
-    if (!classifier->is_valid_binding_pattern()) {
-      ReportClassifierError(classifier->binding_pattern_error());
+    if (!classifier->is_valid_binding_pattern() ||
+        !classifier->is_valid_async_binding_pattern()) {
+      const Scanner::Location& a = classifier->binding_pattern_error().location;
+      const Scanner::Location& b =
+          classifier->async_binding_pattern_error().location;
+      if (a.beg_pos < 0 || (b.beg_pos >= 0 && a.beg_pos > b.beg_pos)) {
+        ReportClassifierError(classifier->async_binding_pattern_error());
+      } else {
+        ReportClassifierError(classifier->binding_pattern_error());
+      }
       *ok = false;
     }
   }
@@ -657,7 +879,8 @@
 
   void ValidateArrowFormalParameters(const ExpressionClassifier* classifier,
                                      ExpressionT expr,
-                                     bool parenthesized_formals, bool* ok) {
+                                     bool parenthesized_formals, bool is_async,
+                                     bool* ok) {
     if (classifier->is_valid_binding_pattern()) {
       // A simple arrow formal parameter: IDENTIFIER => BODY.
       if (!this->IsIdentifier(expr)) {
@@ -677,6 +900,12 @@
       ReportClassifierError(error);
       *ok = false;
     }
+    if (is_async && !classifier->is_valid_async_arrow_formal_parameters()) {
+      const typename ExpressionClassifier::Error& error =
+          classifier->async_arrow_formal_parameters_error();
+      ReportClassifierError(error);
+      *ok = false;
+    }
   }
 
   void ValidateLetPattern(const ExpressionClassifier* classifier, bool* ok) {
@@ -686,6 +915,15 @@
     }
   }
 
+  void CheckNoTailCallExpressions(const ExpressionClassifier* classifier,
+                                  bool* ok) {
+    if (FLAG_harmony_explicit_tailcalls &&
+        classifier->has_tail_call_expression()) {
+      ReportClassifierError(classifier->tail_call_expression_error());
+      *ok = false;
+    }
+  }
+
   void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
     MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
     const char* arg;
@@ -741,27 +979,40 @@
                                  ExpressionClassifier* classifier, bool* ok);
 
   ExpressionT ParsePrimaryExpression(ExpressionClassifier* classifier,
-                                     bool* ok);
+                                     bool* is_async, bool* ok);
+  ExpressionT ParsePrimaryExpression(ExpressionClassifier* classifier,
+                                     bool* ok) {
+    bool is_async;
+    return ParsePrimaryExpression(classifier, &is_async, ok);
+  }
   ExpressionT ParseExpression(bool accept_IN, bool* ok);
   ExpressionT ParseExpression(bool accept_IN, ExpressionClassifier* classifier,
                               bool* ok);
   ExpressionT ParseArrayLiteral(ExpressionClassifier* classifier, bool* ok);
   ExpressionT ParsePropertyName(IdentifierT* name, bool* is_get, bool* is_set,
-                                bool* is_computed_name,
+                                bool* is_await, bool* is_computed_name,
                                 ExpressionClassifier* classifier, bool* ok);
   ExpressionT ParseObjectLiteral(ExpressionClassifier* classifier, bool* ok);
   ObjectLiteralPropertyT ParsePropertyDefinition(
       ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
-      bool is_static, bool* is_computed_name, bool* has_seen_constructor,
+      MethodKind kind, bool* is_computed_name, bool* has_seen_constructor,
       ExpressionClassifier* classifier, IdentifierT* name, bool* ok);
   typename Traits::Type::ExpressionList ParseArguments(
+      Scanner::Location* first_spread_pos, bool maybe_arrow,
+      ExpressionClassifier* classifier, bool* ok);
+  typename Traits::Type::ExpressionList ParseArguments(
       Scanner::Location* first_spread_pos, ExpressionClassifier* classifier,
-      bool* ok);
+      bool* ok) {
+    return ParseArguments(first_spread_pos, false, classifier, ok);
+  }
 
   ExpressionT ParseAssignmentExpression(bool accept_IN,
                                         ExpressionClassifier* classifier,
                                         bool* ok);
-  ExpressionT ParseYieldExpression(ExpressionClassifier* classifier, bool* ok);
+  ExpressionT ParseYieldExpression(bool accept_IN,
+                                   ExpressionClassifier* classifier, bool* ok);
+  ExpressionT ParseTailCallExpression(ExpressionClassifier* classifier,
+                                      bool* ok);
   ExpressionT ParseConditionalExpression(bool accept_IN,
                                          ExpressionClassifier* classifier,
                                          bool* ok);
@@ -773,12 +1024,15 @@
   ExpressionT ParseLeftHandSideExpression(ExpressionClassifier* classifier,
                                           bool* ok);
   ExpressionT ParseMemberWithNewPrefixesExpression(
-      ExpressionClassifier* classifier, bool* ok);
-  ExpressionT ParseMemberExpression(ExpressionClassifier* classifier, bool* ok);
+      ExpressionClassifier* classifier, bool* is_async, bool* ok);
+  ExpressionT ParseMemberExpression(ExpressionClassifier* classifier,
+                                    bool* is_async, bool* ok);
   ExpressionT ParseMemberExpressionContinuation(
-      ExpressionT expression, ExpressionClassifier* classifier, bool* ok);
+      ExpressionT expression, bool* is_async, ExpressionClassifier* classifier,
+      bool* ok);
   ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
                                         const FormalParametersT& parameters,
+                                        bool is_async,
                                         const ExpressionClassifier& classifier,
                                         bool* ok);
   ExpressionT ParseTemplateLiteral(ExpressionT tag, int start,
@@ -850,7 +1104,7 @@
     explicit ObjectLiteralCheckerBase(ParserBase* parser) : parser_(parser) {}
 
     virtual void CheckProperty(Token::Value property, PropertyKind type,
-                               bool is_static, bool is_generator, bool* ok) = 0;
+                               MethodKind method_type, bool* ok) = 0;
 
     virtual ~ObjectLiteralCheckerBase() {}
 
@@ -868,8 +1122,8 @@
     explicit ObjectLiteralChecker(ParserBase* parser)
         : ObjectLiteralCheckerBase(parser), has_seen_proto_(false) {}
 
-    void CheckProperty(Token::Value property, PropertyKind type, bool is_static,
-                       bool is_generator, bool* ok) override;
+    void CheckProperty(Token::Value property, PropertyKind type,
+                       MethodKind method_type, bool* ok) override;
 
    private:
     bool IsProto() { return this->scanner()->LiteralMatches("__proto__", 9); }
@@ -883,8 +1137,8 @@
     explicit ClassLiteralChecker(ParserBase* parser)
         : ObjectLiteralCheckerBase(parser), has_seen_constructor_(false) {}
 
-    void CheckProperty(Token::Value property, PropertyKind type, bool is_static,
-                       bool is_generator, bool* ok) override;
+    void CheckProperty(Token::Value property, PropertyKind type,
+                       MethodKind method_type, bool* ok) override;
 
    private:
     bool IsConstructor() {
@@ -897,12 +1151,6 @@
     bool has_seen_constructor_;
   };
 
-  // If true, the next (and immediately following) function literal is
-  // preceded by a parenthesis.
-  // Heuristically that means that the function will be called immediately,
-  // so never lazily compile it.
-  bool parenthesized_function_;
-
   Scope* scope_;                   // Scope stack.
   FunctionState* function_state_;  // Function state stack.
   v8::Extension* extension_;
@@ -910,6 +1158,7 @@
   AstValueFactory* ast_value_factory_;  // Not owned.
   ParserRecorder* log_;
   Mode mode_;
+  bool parsing_module_;
   uintptr_t stack_limit_;
 
  private:
@@ -921,13 +1170,12 @@
   bool allow_lazy_;
   bool allow_natives_;
   bool allow_tailcalls_;
-  bool allow_harmony_sloppy_;
-  bool allow_harmony_sloppy_function_;
-  bool allow_harmony_sloppy_let_;
   bool allow_harmony_restrictive_declarations_;
   bool allow_harmony_do_expressions_;
+  bool allow_harmony_for_in_;
   bool allow_harmony_function_name_;
   bool allow_harmony_function_sent_;
+  bool allow_harmony_async_await_;
 };
 
 template <class Traits>
@@ -945,11 +1193,19 @@
       outer_function_state_(*function_state_stack),
       scope_stack_(scope_stack),
       outer_scope_(*scope_stack),
-      collect_expressions_in_tail_position_(true),
+      tail_call_expressions_(scope->zone()),
+      return_expr_context_(ReturnExprContext::kInsideValidBlock),
       non_patterns_to_rewrite_(0, scope->zone()),
-      factory_(factory) {
+      factory_(factory),
+      next_function_is_parenthesized_(false),
+      this_function_is_parenthesized_(false) {
   *scope_stack_ = scope;
   *function_state_stack = this;
+  if (outer_function_state_) {
+    this_function_is_parenthesized_ =
+        outer_function_state_->next_function_is_parenthesized_;
+    outer_function_state_->next_function_is_parenthesized_ = false;
+  }
 }
 
 
@@ -979,7 +1235,8 @@
     case Token::IDENTIFIER:
       *message = MessageTemplate::kUnexpectedTokenIdentifier;
       break;
-    case Token::FUTURE_RESERVED_WORD:
+    case Token::AWAIT:
+    case Token::ENUM:
       *message = MessageTemplate::kUnexpectedReserved;
       break;
     case Token::LET:
@@ -1054,7 +1311,8 @@
 ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
                                                bool* ok) {
   Token::Value next = Next();
-  if (next == Token::IDENTIFIER) {
+  if (next == Token::IDENTIFIER || next == Token::ASYNC ||
+      (next == Token::AWAIT && !parsing_module_)) {
     IdentifierT name = this->GetSymbol(scanner());
     // When this function is used to read a formal parameter, we don't always
     // know whether the function is going to be strict or sloppy.  Indeed for
@@ -1079,6 +1337,14 @@
             scanner()->location(), MessageTemplate::kStrictEvalArguments);
       }
     }
+    if (this->IsAwait(name)) {
+      if (is_async_function()) {
+        classifier->RecordPatternError(
+            scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
+      }
+      classifier->RecordAsyncArrowFormalParametersError(
+          scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
+    }
 
     if (classifier->duplicate_finder() != nullptr &&
         scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
@@ -1118,7 +1384,8 @@
 ParserBase<Traits>::ParseIdentifierOrStrictReservedWord(
     bool is_generator, bool* is_strict_reserved, bool* ok) {
   Token::Value next = Next();
-  if (next == Token::IDENTIFIER) {
+  if (next == Token::IDENTIFIER || (next == Token::AWAIT && !parsing_module_) ||
+      next == Token::ASYNC) {
     *is_strict_reserved = false;
   } else if (next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
              next == Token::STATIC || (next == Token::YIELD && !is_generator)) {
@@ -1134,13 +1401,13 @@
   return name;
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::IdentifierT
 ParserBase<Traits>::ParseIdentifierName(bool* ok) {
   Token::Value next = Next();
-  if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
-      next != Token::LET && next != Token::STATIC && next != Token::YIELD &&
+  if (next != Token::IDENTIFIER && next != Token::ASYNC &&
+      next != Token::ENUM && next != Token::AWAIT && next != Token::LET &&
+      next != Token::STATIC && next != Token::YIELD &&
       next != Token::FUTURE_STRICT_RESERVED_WORD &&
       next != Token::ESCAPED_KEYWORD &&
       next != Token::ESCAPED_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
@@ -1195,11 +1462,10 @@
 #define DUMMY )  // to make indentation work
 #undef DUMMY
 
-
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
-                                           bool* ok) {
+                                           bool* is_async, bool* ok) {
   // PrimaryExpression ::
   //   'this'
   //   'null'
@@ -1215,6 +1481,7 @@
   //   '(' Expression ')'
   //   TemplateLiteral
   //   do Block
+  //   AsyncFunctionExpression
 
   int beg_pos = peek_position();
   switch (peek()) {
@@ -1234,10 +1501,21 @@
       BindingPatternUnexpectedToken(classifier);
       return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
 
+    case Token::ASYNC:
+      if (allow_harmony_async_await() &&
+          !scanner()->HasAnyLineTerminatorAfterNext() &&
+          PeekAhead() == Token::FUNCTION) {
+        Consume(Token::ASYNC);
+        return this->ParseAsyncFunctionExpression(CHECK_OK);
+      }
+      // CoverCallExpressionAndAsyncArrowHead
+      *is_async = true;
+    /* falls through */
     case Token::IDENTIFIER:
     case Token::LET:
     case Token::STATIC:
     case Token::YIELD:
+    case Token::AWAIT:
     case Token::ESCAPED_STRICT_RESERVED_WORD:
     case Token::FUTURE_STRICT_RESERVED_WORD: {
       // Using eval or arguments in this context is OK even in strict mode.
@@ -1315,7 +1593,8 @@
       }
       // Heuristically try to detect immediately called functions before
       // seeing the call parentheses.
-      parenthesized_function_ = (peek() == Token::FUNCTION);
+      function_state_->next_function_is_parenthesized(peek() ==
+                                                      Token::FUNCTION);
       ExpressionT expr = this->ParseExpression(true, classifier, CHECK_OK);
       Expect(Token::RPAREN, CHECK_OK);
       return expr;
@@ -1324,11 +1603,6 @@
     case Token::CLASS: {
       BindingPatternUnexpectedToken(classifier);
       Consume(Token::CLASS);
-      if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
-        ReportMessage(MessageTemplate::kSloppyLexical);
-        *ok = false;
-        return this->EmptyExpression();
-      }
       int class_token_position = position();
       IdentifierT name = this->EmptyIdentifier();
       bool is_strict_reserved_name = false;
@@ -1382,7 +1656,6 @@
   return result;
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
     bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
@@ -1398,6 +1671,7 @@
   bool is_simple_parameter_list = this->IsIdentifier(result);
   bool seen_rest = false;
   while (peek() == Token::COMMA) {
+    CheckNoTailCallExpressions(classifier, CHECK_OK);
     if (seen_rest) {
       // At this point the production can't possibly be valid, but we don't know
       // which error to signal.
@@ -1461,6 +1735,7 @@
       int expr_pos = peek_position();
       ExpressionT argument =
           this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+      CheckNoTailCallExpressions(classifier, CHECK_OK);
       elem = factory()->NewSpread(argument, start_pos, expr_pos);
 
       if (first_spread_index < 0) {
@@ -1484,6 +1759,7 @@
     } else {
       int beg_pos = peek_position();
       elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+      CheckNoTailCallExpressions(classifier, CHECK_OK);
       CheckDestructuringElement(elem, classifier, beg_pos,
                                 scanner()->location().end_pos);
     }
@@ -1506,11 +1782,10 @@
   return result;
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
-    IdentifierT* name, bool* is_get, bool* is_set, bool* is_computed_name,
-    ExpressionClassifier* classifier, bool* ok) {
+    IdentifierT* name, bool* is_get, bool* is_set, bool* is_await,
+    bool* is_computed_name, ExpressionClassifier* classifier, bool* ok) {
   Token::Value token = peek();
   int pos = peek_position();
 
@@ -1555,6 +1830,9 @@
     default:
       *name = ParseIdentifierName(CHECK_OK);
       scanner()->IsGetOrSet(is_get, is_set);
+      if (this->IsAwait(*name)) {
+        *is_await = true;
+      }
       break;
   }
 
@@ -1564,38 +1842,50 @@
              : factory()->NewStringLiteral(*name, pos);
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::ObjectLiteralPropertyT
 ParserBase<Traits>::ParsePropertyDefinition(
     ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
-    bool is_static, bool* is_computed_name, bool* has_seen_constructor,
+    MethodKind method_kind, bool* is_computed_name, bool* has_seen_constructor,
     ExpressionClassifier* classifier, IdentifierT* name, bool* ok) {
-  DCHECK(!in_class || is_static || has_seen_constructor != nullptr);
+  DCHECK(!in_class || IsStaticMethod(method_kind) ||
+         has_seen_constructor != nullptr);
   ExpressionT value = this->EmptyExpression();
   bool is_get = false;
   bool is_set = false;
+  bool is_await = false;
   bool is_generator = Check(Token::MUL);
+  bool is_async = false;
+  const bool is_static = IsStaticMethod(method_kind);
 
   Token::Value name_token = peek();
+
+  if (is_generator) {
+    method_kind |= MethodKind::Generator;
+  } else if (allow_harmony_async_await() && name_token == Token::ASYNC &&
+             !scanner()->HasAnyLineTerminatorAfterNext() &&
+             PeekAhead() != Token::LPAREN && PeekAhead()) {
+    is_async = true;
+  }
+
   int next_beg_pos = scanner()->peek_location().beg_pos;
   int next_end_pos = scanner()->peek_location().end_pos;
-  ExpressionT name_expression =
-      ParsePropertyName(name, &is_get, &is_set, is_computed_name, classifier,
-                        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+  ExpressionT name_expression = ParsePropertyName(
+      name, &is_get, &is_set, &is_await, is_computed_name, classifier,
+      CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
   if (fni_ != nullptr && !*is_computed_name) {
     this->PushLiteralName(fni_, *name);
   }
 
   if (!in_class && !is_generator) {
-    DCHECK(!is_static);
+    DCHECK(!IsStaticMethod(method_kind));
 
     if (peek() == Token::COLON) {
       // PropertyDefinition
       //    PropertyName ':' AssignmentExpression
       if (!*is_computed_name) {
-        checker->CheckProperty(name_token, kValueProperty, false, false,
+        checker->CheckProperty(name_token, kValueProperty, MethodKind::Normal,
                                CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
       }
       Consume(Token::COLON);
@@ -1605,12 +1895,12 @@
       CheckDestructuringElement(value, classifier, beg_pos,
                                 scanner()->location().end_pos);
 
-      return factory()->NewObjectLiteralProperty(name_expression, value, false,
-                                                 *is_computed_name);
+      return factory()->NewObjectLiteralProperty(name_expression, value,
+                                                 is_static, *is_computed_name);
     }
 
-    if (Token::IsIdentifier(name_token, language_mode(),
-                            this->is_generator()) &&
+    if (Token::IsIdentifier(name_token, language_mode(), this->is_generator(),
+                            parsing_module_) &&
         (peek() == Token::COMMA || peek() == Token::RBRACE ||
          peek() == Token::ASSIGN)) {
       // PropertyDefinition
@@ -1627,7 +1917,11 @@
         classifier->RecordLetPatternError(
             scanner()->location(), MessageTemplate::kLetInLexicalBinding);
       }
-
+      if (is_await && is_async_function()) {
+        classifier->RecordPatternError(
+            Scanner::Location(next_beg_pos, next_end_pos),
+            MessageTemplate::kAwaitBindingIdentifier);
+      }
       ExpressionT lhs = this->ExpressionFromIdentifier(
           *name, next_beg_pos, next_end_pos, scope_, factory());
       CheckDestructuringElement(lhs, classifier, next_beg_pos, next_end_pos);
@@ -1655,7 +1949,7 @@
       }
 
       return factory()->NewObjectLiteralProperty(
-          name_expression, value, ObjectLiteralProperty::COMPUTED, false,
+          name_expression, value, ObjectLiteralProperty::COMPUTED, is_static,
           false);
     }
   }
@@ -1665,20 +1959,32 @@
       Scanner::Location(next_beg_pos, scanner()->location().end_pos),
       MessageTemplate::kInvalidDestructuringTarget);
 
+  if (is_async && !IsSpecialMethod(method_kind)) {
+    DCHECK(!is_get);
+    DCHECK(!is_set);
+    bool dont_care;
+    name_expression = ParsePropertyName(
+        name, &dont_care, &dont_care, &dont_care, is_computed_name, classifier,
+        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+    method_kind |= MethodKind::Async;
+  }
+
   if (is_generator || peek() == Token::LPAREN) {
     // MethodDefinition
     //    PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
     //    '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
     if (!*is_computed_name) {
-      checker->CheckProperty(name_token, kMethodProperty, is_static,
-                             is_generator,
+      checker->CheckProperty(name_token, kMethodProperty, method_kind,
                              CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
     }
 
-    FunctionKind kind = is_generator ? FunctionKind::kConciseGeneratorMethod
-                                     : FunctionKind::kConciseMethod;
+    FunctionKind kind = is_generator
+                            ? FunctionKind::kConciseGeneratorMethod
+                            : is_async ? FunctionKind::kAsyncConciseMethod
+                                       : FunctionKind::kConciseMethod;
 
-    if (in_class && !is_static && this->IsConstructor(*name)) {
+    if (in_class && !IsStaticMethod(method_kind) &&
+        this->IsConstructor(*name)) {
       *has_seen_constructor = true;
       kind = has_extends ? FunctionKind::kSubclassConstructor
                          : FunctionKind::kBaseConstructor;
@@ -1694,13 +2000,13 @@
                                                is_static, *is_computed_name);
   }
 
-  if (in_class && name_token == Token::STATIC && !is_static) {
+  if (in_class && name_token == Token::STATIC && IsNormalMethod(method_kind)) {
     // ClassElement (static)
     //    'static' MethodDefinition
     *name = this->EmptyIdentifier();
     ObjectLiteralPropertyT property = ParsePropertyDefinition(
-        checker, true, has_extends, true, is_computed_name, nullptr, classifier,
-        name, ok);
+        checker, true, has_extends, MethodKind::Static, is_computed_name,
+        nullptr, classifier, name, ok);
     Traits::RewriteNonPattern(classifier, ok);
     return property;
   }
@@ -1714,12 +2020,11 @@
     name_token = peek();
 
     name_expression = ParsePropertyName(
-        name, &dont_care, &dont_care, is_computed_name, classifier,
+        name, &dont_care, &dont_care, &dont_care, is_computed_name, classifier,
         CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
     if (!*is_computed_name) {
-      checker->CheckProperty(name_token, kAccessorProperty, is_static,
-                             is_generator,
+      checker->CheckProperty(name_token, kAccessorProperty, method_kind,
                              CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
     }
 
@@ -1769,13 +2074,12 @@
     FuncNameInferrer::State fni_state(fni_);
 
     const bool in_class = false;
-    const bool is_static = false;
     const bool has_extends = false;
     bool is_computed_name = false;
     IdentifierT name = this->EmptyIdentifier();
     ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
-        &checker, in_class, has_extends, is_static, &is_computed_name, NULL,
-        classifier, &name, CHECK_OK);
+        &checker, in_class, has_extends, MethodKind::Normal, &is_computed_name,
+        NULL, classifier, &name, CHECK_OK);
 
     if (is_computed_name) {
       has_computed_names = true;
@@ -1809,11 +2113,10 @@
                                      pos);
 }
 
-
 template <class Traits>
 typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
-    Scanner::Location* first_spread_arg_loc, ExpressionClassifier* classifier,
-    bool* ok) {
+    Scanner::Location* first_spread_arg_loc, bool maybe_arrow,
+    ExpressionClassifier* classifier, bool* ok) {
   // Arguments ::
   //   '(' (AssignmentExpression)*[','] ')'
 
@@ -1831,6 +2134,7 @@
 
     ExpressionT argument = this->ParseAssignmentExpression(
         true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
+    CheckNoTailCallExpressions(classifier, CHECK_OK_CUSTOM(NullExpressionList));
     Traits::RewriteNonPattern(classifier, CHECK_OK_CUSTOM(NullExpressionList));
     if (is_spread) {
       if (!spread_arg.IsValid()) {
@@ -1868,7 +2172,7 @@
   }
   *first_spread_arg_loc = spread_arg;
 
-  if (spread_arg.IsValid()) {
+  if ((!maybe_arrow || peek() != Token::ARROW) && spread_arg.IsValid()) {
     // Unspread parameter sequences are translated into array literals in the
     // parser. Ensure that the number of materialized literals matches between
     // the parser and preparser
@@ -1893,25 +2197,38 @@
   int lhs_beg_pos = peek_position();
 
   if (peek() == Token::YIELD && is_generator()) {
-    return this->ParseYieldExpression(classifier, ok);
+    return this->ParseYieldExpression(accept_IN, classifier, ok);
   }
 
   FuncNameInferrer::State fni_state(fni_);
   ParserBase<Traits>::Checkpoint checkpoint(this);
   ExpressionClassifier arrow_formals_classifier(this,
                                                 classifier->duplicate_finder());
+
+  bool is_async = allow_harmony_async_await() && peek() == Token::ASYNC &&
+                  !scanner()->HasAnyLineTerminatorAfterNext();
+
   bool parenthesized_formals = peek() == Token::LPAREN;
-  if (!parenthesized_formals) {
+  if (!is_async && !parenthesized_formals) {
     ArrowFormalParametersUnexpectedToken(&arrow_formals_classifier);
   }
   ExpressionT expression = this->ParseConditionalExpression(
       accept_IN, &arrow_formals_classifier, CHECK_OK);
+
+  if (is_async && peek_any_identifier() && PeekAhead() == Token::ARROW) {
+    // async Identifier => AsyncConciseBody
+    IdentifierT name =
+        ParseAndClassifyIdentifier(&arrow_formals_classifier, CHECK_OK);
+    expression = this->ExpressionFromIdentifier(
+        name, position(), scanner()->location().end_pos, scope_, factory());
+  }
+
   if (peek() == Token::ARROW) {
     classifier->RecordPatternError(scanner()->peek_location(),
                                    MessageTemplate::kUnexpectedToken,
                                    Token::String(Token::ARROW));
     ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
-                                  parenthesized_formals, CHECK_OK);
+                                  parenthesized_formals, is_async, CHECK_OK);
     // This reads strangely, but is correct: it checks whether any
     // sub-expression of the parameter list failed to be a valid formal
     // parameter initializer. Since YieldExpressions are banned anywhere
@@ -1919,9 +2236,11 @@
     // TODO(adamk): Rename "FormalParameterInitializerError" to refer to
     // "YieldExpression", which is its only use.
     ValidateFormalParameterInitializer(&arrow_formals_classifier, ok);
+
     Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
-    Scope* scope =
-        this->NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
+    Scope* scope = this->NewScope(scope_, FUNCTION_SCOPE,
+                                  is_async ? FunctionKind::kAsyncArrowFunction
+                                           : FunctionKind::kArrowFunction);
     // Because the arrow's parameters were parsed in the outer scope, any
     // usage flags that might have been triggered there need to be copied
     // to the arrow scope.
@@ -1943,7 +2262,7 @@
           duplicate_loc);
     }
     expression = this->ParseArrowFunctionLiteral(
-        accept_IN, parameters, arrow_formals_classifier, CHECK_OK);
+        accept_IN, parameters, is_async, arrow_formals_classifier, CHECK_OK);
 
     if (fni_ != nullptr) fni_->Infer();
 
@@ -1960,8 +2279,10 @@
   classifier->Accumulate(
       &arrow_formals_classifier,
       ExpressionClassifier::StandardProductions |
-      ExpressionClassifier::FormalParametersProductions |
-      ExpressionClassifier::CoverInitializedNameProduction,
+          ExpressionClassifier::FormalParametersProductions |
+          ExpressionClassifier::CoverInitializedNameProduction |
+          ExpressionClassifier::AsyncArrowFormalParametersProduction |
+          ExpressionClassifier::AsyncBindingPatternProduction,
       false);
 
   if (!Token::IsAssignmentOp(peek())) {
@@ -1974,6 +2295,8 @@
   // Now pending non-pattern expressions must be discarded.
   arrow_formals_classifier.Discard();
 
+  CheckNoTailCallExpressions(classifier, CHECK_OK);
+
   if (IsValidPattern(expression) && peek() == Token::ASSIGN) {
     classifier->ForgiveCoverInitializedNameError();
     ValidateAssignmentPattern(classifier, CHECK_OK);
@@ -1998,10 +2321,13 @@
 
   ExpressionT right =
       this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
+  CheckNoTailCallExpressions(&rhs_classifier, CHECK_OK);
   Traits::RewriteNonPattern(&rhs_classifier, CHECK_OK);
   classifier->Accumulate(
-      &rhs_classifier, ExpressionClassifier::ExpressionProductions |
-                       ExpressionClassifier::CoverInitializedNameProduction);
+      &rhs_classifier,
+      ExpressionClassifier::ExpressionProductions |
+          ExpressionClassifier::CoverInitializedNameProduction |
+          ExpressionClassifier::AsyncArrowFormalParametersProduction);
 
   // TODO(1231235): We try to estimate the set of properties set by
   // constructors. We define a new property whenever there is an
@@ -2047,7 +2373,8 @@
 
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
+ParserBase<Traits>::ParseYieldExpression(bool accept_IN,
+                                         ExpressionClassifier* classifier,
                                          bool* ok) {
   // YieldExpression ::
   //   'yield' ([no line terminator] '*'? AssignmentExpression)?
@@ -2078,7 +2405,7 @@
         if (!delegating) break;
         // Delegating yields require an RHS; fall through.
       default:
-        expression = ParseAssignmentExpression(false, classifier, CHECK_OK);
+        expression = ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
         Traits::RewriteNonPattern(classifier, CHECK_OK);
         break;
     }
@@ -2096,6 +2423,67 @@
   return yield;
 }
 
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseTailCallExpression(ExpressionClassifier* classifier,
+                                            bool* ok) {
+  // TailCallExpression::
+  //   'continue' MemberExpression  Arguments
+  //   'continue' CallExpression  Arguments
+  //   'continue' MemberExpression  TemplateLiteral
+  //   'continue' CallExpression  TemplateLiteral
+  Expect(Token::CONTINUE, CHECK_OK);
+  int pos = position();
+  int sub_expression_pos = peek_position();
+  ExpressionT expression =
+      this->ParseLeftHandSideExpression(classifier, CHECK_OK);
+  CheckNoTailCallExpressions(classifier, CHECK_OK);
+
+  Scanner::Location loc(pos, scanner()->location().end_pos);
+  if (!expression->IsCall()) {
+    Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
+    ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedInsideTailCall);
+    *ok = false;
+    return Traits::EmptyExpression();
+  }
+  if (Traits::IsDirectEvalCall(expression)) {
+    Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
+    ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedTailCallOfEval);
+    *ok = false;
+    return Traits::EmptyExpression();
+  }
+  if (!is_strict(language_mode())) {
+    ReportMessageAt(loc, MessageTemplate::kUnexpectedSloppyTailCall);
+    *ok = false;
+    return Traits::EmptyExpression();
+  }
+  ReturnExprContext return_expr_context =
+      function_state_->return_expr_context();
+  if (return_expr_context != ReturnExprContext::kInsideValidReturnStatement) {
+    MessageTemplate::Template msg = MessageTemplate::kNone;
+    switch (return_expr_context) {
+      case ReturnExprContext::kInsideValidReturnStatement:
+        UNREACHABLE();
+        return Traits::EmptyExpression();
+      case ReturnExprContext::kInsideValidBlock:
+        msg = MessageTemplate::kUnexpectedTailCall;
+        break;
+      case ReturnExprContext::kInsideTryBlock:
+        msg = MessageTemplate::kUnexpectedTailCallInTryBlock;
+        break;
+      case ReturnExprContext::kInsideForInOfBody:
+        msg = MessageTemplate::kUnexpectedTailCallInForInOf;
+        break;
+    }
+    ReportMessageAt(loc, msg);
+    *ok = false;
+    return Traits::EmptyExpression();
+  }
+  classifier->RecordTailCallExpressionError(
+      loc, MessageTemplate::kUnexpectedTailCall);
+  function_state_->AddExplicitTailCallExpression(expression, loc);
+  return expression;
+}
 
 // Precedence = 3
 template <class Traits>
@@ -2112,6 +2500,7 @@
   ExpressionT expression =
       this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
   if (peek() != Token::CONDITIONAL) return expression;
+  CheckNoTailCallExpressions(classifier, CHECK_OK);
   Traits::RewriteNonPattern(classifier, CHECK_OK);
   ArrowFormalParametersUnexpectedToken(classifier);
   BindingPatternUnexpectedToken(classifier);
@@ -2140,6 +2529,7 @@
   for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
     // prec1 >= 4
     while (Precedence(peek(), accept_IN) == prec1) {
+      CheckNoTailCallExpressions(classifier, CHECK_OK);
       Traits::RewriteNonPattern(classifier, CHECK_OK);
       BindingPatternUnexpectedToken(classifier);
       ArrowFormalParametersUnexpectedToken(classifier);
@@ -2150,6 +2540,9 @@
       const int next_prec = is_right_associative ? prec1 : prec1 + 1;
       ExpressionT y =
           ParseBinaryExpression(next_prec, accept_IN, classifier, CHECK_OK);
+      if (op != Token::OR && op != Token::AND) {
+        CheckNoTailCallExpressions(classifier, CHECK_OK);
+      }
       Traits::RewriteNonPattern(classifier, CHECK_OK);
 
       if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
@@ -2168,16 +2561,11 @@
           case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
           default: break;
         }
-        if (FLAG_harmony_instanceof && cmp == Token::INSTANCEOF) {
-          x = Traits::RewriteInstanceof(x, y, pos);
-        } else {
-          x = factory()->NewCompareOperation(cmp, x, y, pos);
-          if (cmp != op) {
-            // The comparison was negated - add a NOT.
-            x = factory()->NewUnaryOperation(Token::NOT, x, pos);
-          }
+        x = factory()->NewCompareOperation(cmp, x, y, pos);
+        if (cmp != op) {
+          // The comparison was negated - add a NOT.
+          x = factory()->NewUnaryOperation(Token::NOT, x, pos);
         }
-
       } else if (op == Token::EXP) {
         x = Traits::RewriteExponentiation(x, y, pos);
       } else {
@@ -2205,6 +2593,7 @@
   //   '-' UnaryExpression
   //   '~' UnaryExpression
   //   '!' UnaryExpression
+  //   [+Await] AwaitExpression[?Yield]
 
   Token::Value op = peek();
   if (Token::IsUnaryOp(op)) {
@@ -2214,6 +2603,7 @@
     op = Next();
     int pos = position();
     ExpressionT expression = ParseUnaryExpression(classifier, CHECK_OK);
+    CheckNoTailCallExpressions(classifier, CHECK_OK);
     Traits::RewriteNonPattern(classifier, CHECK_OK);
 
     if (op == Token::DELETE && is_strict(language_mode())) {
@@ -2239,6 +2629,7 @@
     op = Next();
     int beg_pos = peek_position();
     ExpressionT expression = this->ParseUnaryExpression(classifier, CHECK_OK);
+    CheckNoTailCallExpressions(classifier, CHECK_OK);
     expression = this->CheckAndRewriteReferenceExpression(
         expression, beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
@@ -2250,6 +2641,40 @@
                                         expression,
                                         position());
 
+  } else if (is_async_function() && peek() == Token::AWAIT) {
+    int beg_pos = peek_position();
+    switch (PeekAhead()) {
+      case Token::RPAREN:
+      case Token::RBRACK:
+      case Token::RBRACE:
+      case Token::ASSIGN:
+      case Token::COMMA: {
+        Next();
+        IdentifierT name = this->GetSymbol(scanner());
+
+        // Possibly async arrow formals --- record ExpressionError just in case.
+        ExpressionUnexpectedToken(classifier);
+        classifier->RecordAsyncBindingPatternError(
+            Scanner::Location(beg_pos, scanner()->location().end_pos),
+            MessageTemplate::kAwaitBindingIdentifier);
+        classifier->RecordAsyncArrowFormalParametersError(
+            Scanner::Location(beg_pos, scanner()->location().end_pos),
+            MessageTemplate::kAwaitBindingIdentifier);
+
+        return this->ExpressionFromIdentifier(
+            name, beg_pos, scanner()->location().end_pos, scope_, factory());
+      }
+      default:
+        break;
+    }
+    Consume(Token::AWAIT);
+
+    ExpressionT value = ParseUnaryExpression(classifier, CHECK_OK);
+
+    classifier->RecordFormalParameterInitializerError(
+        Scanner::Location(beg_pos, scanner()->location().end_pos),
+        MessageTemplate::kAwaitExpressionFormalParameter);
+    return Traits::RewriteAwaitExpression(value, beg_pos);
   } else {
     return this->ParsePostfixExpression(classifier, ok);
   }
@@ -2268,6 +2693,7 @@
       this->ParseLeftHandSideExpression(classifier, CHECK_OK);
   if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
       Token::IsCountOp(peek())) {
+    CheckNoTailCallExpressions(classifier, CHECK_OK);
     BindingPatternUnexpectedToken(classifier);
     ArrowFormalParametersUnexpectedToken(classifier);
 
@@ -2287,7 +2713,6 @@
   return expression;
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseLeftHandSideExpression(
@@ -2295,12 +2720,18 @@
   // LeftHandSideExpression ::
   //   (NewExpression | MemberExpression) ...
 
-  ExpressionT result =
-      this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
+  if (FLAG_harmony_explicit_tailcalls && peek() == Token::CONTINUE) {
+    return this->ParseTailCallExpression(classifier, ok);
+  }
+
+  bool is_async = false;
+  ExpressionT result = this->ParseMemberWithNewPrefixesExpression(
+      classifier, &is_async, CHECK_OK);
 
   while (true) {
     switch (peek()) {
       case Token::LBRACK: {
+        CheckNoTailCallExpressions(classifier, CHECK_OK);
         Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
@@ -2314,13 +2745,13 @@
       }
 
       case Token::LPAREN: {
+        CheckNoTailCallExpressions(classifier, CHECK_OK);
+        int pos;
         Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
-        ArrowFormalParametersUnexpectedToken(classifier);
-
-        int pos;
         if (scanner()->current_token() == Token::IDENTIFIER ||
-            scanner()->current_token() == Token::SUPER) {
+            scanner()->current_token() == Token::SUPER ||
+            scanner()->current_token() == Token::ASYNC) {
           // For call of an identifier we want to report position of
           // the identifier as position of the call in the stack trace.
           pos = position();
@@ -2340,7 +2771,18 @@
         }
         Scanner::Location spread_pos;
         typename Traits::Type::ExpressionList args =
-            ParseArguments(&spread_pos, classifier, CHECK_OK);
+            ParseArguments(&spread_pos, is_async, classifier, CHECK_OK);
+
+        if (V8_UNLIKELY(is_async && peek() == Token::ARROW)) {
+          if (args->length()) {
+            // async ( Arguments ) => ...
+            return Traits::ExpressionListToExpression(args);
+          }
+          // async () => ...
+          return factory()->NewEmptyParentheses(pos);
+        }
+
+        ArrowFormalParametersUnexpectedToken(classifier);
 
         // Keep track of eval() calls since they disable all local variable
         // optimizations.
@@ -2372,6 +2814,7 @@
       }
 
       case Token::PERIOD: {
+        CheckNoTailCallExpressions(classifier, CHECK_OK);
         Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
@@ -2386,6 +2829,7 @@
 
       case Token::TEMPLATE_SPAN:
       case Token::TEMPLATE_TAIL: {
+        CheckNoTailCallExpressions(classifier, CHECK_OK);
         Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
@@ -2399,11 +2843,10 @@
   }
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
-    ExpressionClassifier* classifier, bool* ok) {
+    ExpressionClassifier* classifier, bool* is_async, bool* ok) {
   // NewExpression ::
   //   ('new')+ MemberExpression
   //
@@ -2436,7 +2879,8 @@
     } else if (peek() == Token::PERIOD) {
       return ParseNewTargetExpression(CHECK_OK);
     } else {
-      result = this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
+      result = this->ParseMemberWithNewPrefixesExpression(classifier, is_async,
+                                                          CHECK_OK);
     }
     Traits::RewriteNonPattern(classifier, CHECK_OK);
     if (peek() == Token::LPAREN) {
@@ -2452,8 +2896,8 @@
         result = factory()->NewCallNew(result, args, new_pos);
       }
       // The expression can still continue with . or [ after the arguments.
-      result =
-          this->ParseMemberExpressionContinuation(result, classifier, CHECK_OK);
+      result = this->ParseMemberExpressionContinuation(result, is_async,
+                                                       classifier, CHECK_OK);
       return result;
     }
     // NewExpression without arguments.
@@ -2461,14 +2905,13 @@
                                  new_pos);
   }
   // No 'new' or 'super' keyword.
-  return this->ParseMemberExpression(classifier, ok);
+  return this->ParseMemberExpression(classifier, is_async, ok);
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
-                                          bool* ok) {
+                                          bool* is_async, bool* ok) {
   // MemberExpression ::
   //   (PrimaryExpression | FunctionLiteral | ClassLiteral)
   //     ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
@@ -2525,10 +2968,11 @@
     const bool is_new = false;
     result = ParseSuperExpression(is_new, classifier, CHECK_OK);
   } else {
-    result = ParsePrimaryExpression(classifier, CHECK_OK);
+    result = ParsePrimaryExpression(classifier, is_async, CHECK_OK);
   }
 
-  result = ParseMemberExpressionContinuation(result, classifier, CHECK_OK);
+  result =
+      ParseMemberExpressionContinuation(result, is_async, classifier, CHECK_OK);
   return result;
 }
 
@@ -2595,16 +3039,17 @@
   return this->NewTargetExpression(scope_, factory(), pos);
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseMemberExpressionContinuation(
-    ExpressionT expression, ExpressionClassifier* classifier, bool* ok) {
+    ExpressionT expression, bool* is_async, ExpressionClassifier* classifier,
+    bool* ok) {
   // Parses this part of MemberExpression:
   // ('[' Expression ']' | '.' Identifier | TemplateLiteral)*
   while (true) {
     switch (peek()) {
       case Token::LBRACK: {
+        *is_async = false;
         Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
@@ -2621,6 +3066,7 @@
         break;
       }
       case Token::PERIOD: {
+        *is_async = false;
         Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
@@ -2637,6 +3083,7 @@
       }
       case Token::TEMPLATE_SPAN:
       case Token::TEMPLATE_TAIL: {
+        *is_async = false;
         Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
@@ -2790,28 +3237,32 @@
 template <class Traits>
 bool ParserBase<Traits>::IsNextLetKeyword() {
   DCHECK(peek() == Token::LET);
-  if (!allow_let()) {
-    return false;
-  }
   Token::Value next_next = PeekAhead();
   switch (next_next) {
     case Token::LBRACE:
     case Token::LBRACK:
     case Token::IDENTIFIER:
     case Token::STATIC:
-    case Token::LET:  // Yes, you can do let let = ... in sloppy mode
+    case Token::LET:  // `let let;` is disallowed by static semantics, but the
+                      // token must be first interpreted as a keyword in order
+                      // for those semantics to apply. This ensures that ASI is
+                      // not honored when a LineTerminator separates the
+                      // tokens.
     case Token::YIELD:
+    case Token::AWAIT:
+    case Token::ASYNC:
       return true;
+    case Token::FUTURE_STRICT_RESERVED_WORD:
+      return is_sloppy(language_mode());
     default:
       return false;
   }
 }
 
-
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseArrowFunctionLiteral(
-    bool accept_IN, const FormalParametersT& formal_parameters,
+    bool accept_IN, const FormalParametersT& formal_parameters, bool is_async,
     const ExpressionClassifier& formals_classifier, bool* ok) {
   if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
     // ASI inserts `;` after arrow parameters if a line terminator is found.
@@ -2828,10 +3279,11 @@
   int expected_property_count = -1;
   Scanner::Location super_loc;
 
+  FunctionKind arrow_kind = is_async ? kAsyncArrowFunction : kArrowFunction;
   {
     typename Traits::Type::Factory function_factory(ast_value_factory());
     FunctionState function_state(&function_state_, &scope_,
-                                 formal_parameters.scope, kArrowFunction,
+                                 formal_parameters.scope, arrow_kind,
                                  &function_factory);
 
     function_state.SkipMaterializedLiterals(
@@ -2857,7 +3309,7 @@
       } else {
         body = this->ParseEagerFunctionBody(
             this->EmptyIdentifier(), RelocInfo::kNoPosition, formal_parameters,
-            kArrowFunction, FunctionLiteral::kAnonymousExpression, CHECK_OK);
+            arrow_kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
         materialized_literal_count =
             function_state.materialized_literal_count();
         expected_property_count = function_state.expected_property_count();
@@ -2865,20 +3317,31 @@
     } else {
       // Single-expression body
       int pos = position();
-      parenthesized_function_ = false;
       ExpressionClassifier classifier(this);
-      ExpressionT expression =
-          ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
-      Traits::RewriteNonPattern(&classifier, CHECK_OK);
+      DCHECK(ReturnExprContext::kInsideValidBlock ==
+             function_state_->return_expr_context());
+      ReturnExprScope allow_tail_calls(
+          function_state_, ReturnExprContext::kInsideValidReturnStatement);
       body = this->NewStatementList(1, zone());
-      this->AddParameterInitializationBlock(formal_parameters, body, CHECK_OK);
-      body->Add(factory()->NewReturnStatement(expression, pos), zone());
+      this->AddParameterInitializationBlock(formal_parameters, body, is_async,
+                                            CHECK_OK);
+      if (is_async) {
+        this->ParseAsyncArrowSingleExpressionBody(body, accept_IN, &classifier,
+                                                  pos, CHECK_OK);
+        Traits::RewriteNonPattern(&classifier, CHECK_OK);
+      } else {
+        ExpressionT expression =
+            ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
+        Traits::RewriteNonPattern(&classifier, CHECK_OK);
+        body->Add(factory()->NewReturnStatement(expression, pos), zone());
+        if (allow_tailcalls() && !is_sloppy(language_mode())) {
+          // ES6 14.6.1 Static Semantics: IsInTailPosition
+          this->MarkTailPosition(expression);
+        }
+      }
       materialized_literal_count = function_state.materialized_literal_count();
       expected_property_count = function_state.expected_property_count();
-      // ES6 14.6.1 Static Semantics: IsInTailPosition
-      if (allow_tailcalls() && !is_sloppy(language_mode())) {
-        this->MarkTailPosition(expression);
-      }
+      this->MarkCollectedTailCallExpressions();
     }
     super_loc = function_state.super_location();
 
@@ -2897,9 +3360,7 @@
       CheckStrictOctalLiteral(formal_parameters.scope->start_position(),
                               scanner()->location().end_pos, CHECK_OK);
     }
-    if (is_strict(language_mode()) || allow_harmony_sloppy()) {
-      this->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
-    }
+    this->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
 
     Traits::RewriteDestructuringAssignments();
   }
@@ -2909,7 +3370,7 @@
       materialized_literal_count, expected_property_count, num_parameters,
       FunctionLiteral::kNoDuplicateParameters,
       FunctionLiteral::kAnonymousExpression,
-      FunctionLiteral::kShouldLazyCompile, FunctionKind::kArrowFunction,
+      FunctionLiteral::kShouldLazyCompile, arrow_kind,
       formal_parameters.scope->start_position());
 
   function_literal->set_function_token_position(
@@ -2979,6 +3440,7 @@
 
     int expr_pos = peek_position();
     ExpressionT expression = this->ParseExpression(true, classifier, CHECK_OK);
+    CheckNoTailCallExpressions(classifier, CHECK_OK);
     Traits::RewriteNonPattern(classifier, CHECK_OK);
     Traits::AddTemplateExpression(&ts, expression);
 
@@ -3077,13 +3539,12 @@
 #undef CHECK_OK
 #undef CHECK_OK_CUSTOM
 
-
 template <typename Traits>
 void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
-    Token::Value property, PropertyKind type, bool is_static, bool is_generator,
+    Token::Value property, PropertyKind type, MethodKind method_type,
     bool* ok) {
-  DCHECK(!is_static);
-  DCHECK(!is_generator || type == kMethodProperty);
+  DCHECK(!IsStaticMethod(method_type));
+  DCHECK(!IsSpecialMethod(method_type) || type == kMethodProperty);
 
   if (property == Token::SMI || property == Token::NUMBER) return;
 
@@ -3098,26 +3559,28 @@
   }
 }
 
-
 template <typename Traits>
 void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
-    Token::Value property, PropertyKind type, bool is_static, bool is_generator,
+    Token::Value property, PropertyKind type, MethodKind method_type,
     bool* ok) {
   DCHECK(type == kMethodProperty || type == kAccessorProperty);
 
   if (property == Token::SMI || property == Token::NUMBER) return;
 
-  if (is_static) {
+  if (IsStaticMethod(method_type)) {
     if (IsPrototype()) {
       this->parser()->ReportMessage(MessageTemplate::kStaticPrototype);
       *ok = false;
       return;
     }
   } else if (IsConstructor()) {
-    if (is_generator || type == kAccessorProperty) {
+    const bool is_generator = IsGeneratorMethod(method_type);
+    const bool is_async = IsAsyncMethod(method_type);
+    if (is_generator || is_async || type == kAccessorProperty) {
       MessageTemplate::Template msg =
           is_generator ? MessageTemplate::kConstructorIsGenerator
-                       : MessageTemplate::kConstructorIsAccessor;
+                       : is_async ? MessageTemplate::kConstructorIsAsync
+                                  : MessageTemplate::kConstructorIsAccessor;
       this->parser()->ReportMessage(msg);
       *ok = false;
       return;
diff --git a/src/parsing/parser.cc b/src/parsing/parser.cc
index fa2893b..822c49e 100644
--- a/src/parsing/parser.cc
+++ b/src/parsing/parser.cc
@@ -39,7 +39,6 @@
   }
 }
 
-
 ParseInfo::ParseInfo(Zone* zone)
     : zone_(zone),
       flags_(0),
@@ -51,15 +50,14 @@
       unicode_cache_(nullptr),
       stack_limit_(0),
       hash_seed_(0),
+      isolate_(nullptr),
       cached_data_(nullptr),
       ast_value_factory_(nullptr),
       literal_(nullptr),
       scope_(nullptr) {}
 
-
 ParseInfo::ParseInfo(Zone* zone, Handle<JSFunction> function)
     : ParseInfo(zone, Handle<SharedFunctionInfo>(function->shared())) {
-  set_closure(function);
   set_context(Handle<Context>(function->context()));
 }
 
@@ -332,6 +330,10 @@
   return identifier == parser_->ast_value_factory()->undefined_string();
 }
 
+bool ParserTraits::IsAwait(const AstRawString* identifier) const {
+  return identifier == parser_->ast_value_factory()->await_string();
+}
+
 bool ParserTraits::IsPrototype(const AstRawString* identifier) const {
   return identifier == parser_->ast_value_factory()->prototype_string();
 }
@@ -604,8 +606,7 @@
 const AstRawString* ParserTraits::GetNumberAsSymbol(Scanner* scanner) {
   double double_value = parser_->scanner()->DoubleValue();
   char array[100];
-  const char* string =
-      DoubleToCString(double_value, Vector<char>(array, arraysize(array)));
+  const char* string = DoubleToCString(double_value, ArrayVector(array));
   return parser_->ast_value_factory()->GetOneByteString(string);
 }
 
@@ -768,6 +769,10 @@
   expression->MarkTail();
 }
 
+void ParserTraits::MarkCollectedTailCallExpressions() {
+  parser_->MarkCollectedTailCallExpressions();
+}
+
 Parser::Parser(ParseInfo* info)
     : ParserBase<ParserTraits>(info->zone(), &scanner_, info->stack_limit(),
                                info->extension(), info->ast_value_factory(),
@@ -789,16 +794,15 @@
   set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
   set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
                       info->isolate()->is_tail_call_elimination_enabled());
-  set_allow_harmony_sloppy(FLAG_harmony_sloppy);
-  set_allow_harmony_sloppy_function(FLAG_harmony_sloppy_function);
-  set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
   set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
+  set_allow_harmony_for_in(FLAG_harmony_for_in);
   set_allow_harmony_function_name(FLAG_harmony_function_name);
   set_allow_harmony_function_sent(FLAG_harmony_function_sent);
   set_allow_harmony_restrictive_declarations(
       FLAG_harmony_restrictive_declarations);
   set_allow_harmony_exponentiation_operator(
       FLAG_harmony_exponentiation_operator);
+  set_allow_harmony_async_await(FLAG_harmony_async_await);
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
     use_counts_[feature] = 0;
@@ -821,6 +825,7 @@
   DCHECK(parsing_on_main_thread_);
 
   HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
+  RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::Parse);
   TRACE_EVENT0("v8", "V8.Parse");
   Handle<String> source(String::cast(info->script()->source()));
   isolate->counters()->total_parse_size()->Increment(source->length());
@@ -925,15 +930,16 @@
     FunctionState function_state(&function_state_, &scope_, scope,
                                  kNormalFunction, &function_factory);
 
-    // Don't count the mode in the use counters--give the program a chance
-    // to enable script/module-wide strict mode below.
-    scope_->SetLanguageMode(info->language_mode());
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
     bool ok = true;
     int beg_pos = scanner()->location().beg_pos;
-    if (info->is_module()) {
+    parsing_module_ = info->is_module();
+    if (parsing_module_) {
       ParseModuleItemList(body, &ok);
     } else {
+      // Don't count the mode in the use counters--give the program a chance
+      // to enable script-wide strict mode below.
+      scope_->SetLanguageMode(info->language_mode());
       ParseStatementList(body, Token::EOS, &ok);
     }
 
@@ -943,8 +949,10 @@
 
     if (ok && is_strict(language_mode())) {
       CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
+      CheckDecimalLiteralWithLeadingZero(use_counts_, beg_pos,
+                                         scanner()->location().end_pos);
     }
-    if (ok && is_sloppy(language_mode()) && allow_harmony_sloppy_function()) {
+    if (ok && is_sloppy(language_mode())) {
       // TODO(littledan): Function bindings on the global object that modify
       // pre-existing bindings should be made writable, enumerable and
       // nonconfigurable if possible, whereas this code will leave attributes
@@ -984,6 +992,7 @@
   // It's OK to use the Isolate & counters here, since this function is only
   // called in the main thread.
   DCHECK(parsing_on_main_thread_);
+  RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::ParseLazy);
   HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
   TRACE_EVENT0("v8", "V8.ParseLazy");
   Handle<String> source(String::cast(info->script()->source()));
@@ -1054,12 +1063,12 @@
     // Parse the function literal.
     Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
     info->set_script_scope(scope);
-    if (!info->closure().is_null()) {
+    if (!info->context().is_null()) {
       // Ok to use Isolate here, since lazy function parsing is only done in the
       // main thread.
       DCHECK(parsing_on_main_thread_);
-      scope = Scope::DeserializeScopeChain(isolate, zone(),
-                                           info->closure()->context(), scope);
+      scope = Scope::DeserializeScopeChain(isolate, zone(), *info->context(),
+                                           scope);
     }
     original_scope_ = scope;
     AstNodeFactory function_factory(ast_value_factory());
@@ -1073,6 +1082,13 @@
     bool ok = true;
 
     if (shared_info->is_arrow()) {
+      bool is_async = allow_harmony_async_await() && shared_info->is_async();
+      if (is_async) {
+        DCHECK(!scanner()->HasAnyLineTerminatorAfterNext());
+        Consume(Token::ASYNC);
+        DCHECK(peek_any_identifier() || peek() == Token::LPAREN);
+      }
+
       // TODO(adamk): We should construct this scope from the ScopeInfo.
       Scope* scope =
           NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
@@ -1113,8 +1129,8 @@
         checkpoint.Restore(&formals.materialized_literals_count);
         // Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
         // not be observable, or else the preparser would have failed.
-        Expression* expression =
-            ParseArrowFunctionLiteral(true, formals, formals_classifier, &ok);
+        Expression* expression = ParseArrowFunctionLiteral(
+            true, formals, is_async, formals_classifier, &ok);
         if (ok) {
           // Scanning must end at the same position that was recorded
           // previously. If not, parsing has been interrupted due to a stack
@@ -1253,18 +1269,15 @@
   // StatementListItem:
   //    Statement
   //    Declaration
-
-  switch (peek()) {
+  const Token::Value peeked = peek();
+  switch (peeked) {
     case Token::FUNCTION:
-      return ParseFunctionDeclaration(NULL, ok);
+      return ParseHoistableDeclaration(NULL, ok);
     case Token::CLASS:
       Consume(Token::CLASS);
       return ParseClassDeclaration(NULL, ok);
     case Token::CONST:
-      if (allow_const()) {
-        return ParseVariableStatement(kStatementListItem, NULL, ok);
-      }
-      break;
+      return ParseVariableStatement(kStatementListItem, NULL, ok);
     case Token::VAR:
       return ParseVariableStatement(kStatementListItem, NULL, ok);
     case Token::LET:
@@ -1272,6 +1285,13 @@
         return ParseVariableStatement(kStatementListItem, NULL, ok);
       }
       break;
+    case Token::ASYNC:
+      if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+          !scanner()->HasAnyLineTerminatorAfterNext()) {
+        Consume(Token::ASYNC);
+        return ParseAsyncFunctionDeclaration(NULL, ok);
+      }
+    /* falls through */
     default:
       break;
   }
@@ -1306,7 +1326,6 @@
   //    ModuleItem*
 
   DCHECK(scope_->is_module_scope());
-  RaiseLanguageMode(STRICT);
 
   while (peek() != Token::EOS) {
     Statement* stat = ParseModuleItem(CHECK_OK);
@@ -1367,7 +1386,7 @@
     // Keep track of the first reserved word encountered in case our
     // caller needs to report an error.
     if (!reserved_loc->IsValid() &&
-        !Token::IsIdentifier(name_tok, STRICT, false)) {
+        !Token::IsIdentifier(name_tok, STRICT, false, parsing_module_)) {
       *reserved_loc = scanner()->location();
     }
     const AstRawString* local_name = ParseIdentifierName(CHECK_OK);
@@ -1418,7 +1437,8 @@
     if (CheckContextualKeyword(CStrVector("as"))) {
       local_name = ParseIdentifierName(CHECK_OK);
     }
-    if (!Token::IsIdentifier(scanner()->current_token(), STRICT, false)) {
+    if (!Token::IsIdentifier(scanner()->current_token(), STRICT, false,
+                             parsing_module_)) {
       *ok = false;
       ReportMessage(MessageTemplate::kUnexpectedReserved);
       return NULL;
@@ -1427,7 +1447,7 @@
       ReportMessage(MessageTemplate::kStrictEvalArguments);
       return NULL;
     }
-    VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
+    VariableProxy* proxy = NewUnresolved(local_name, CONST);
     ImportDeclaration* declaration =
         factory()->NewImportDeclaration(proxy, import_name, NULL, scope_, pos);
     Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
@@ -1475,7 +1495,7 @@
   if (tok != Token::MUL && tok != Token::LBRACE) {
     const AstRawString* local_name =
         ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
-    VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
+    VariableProxy* proxy = NewUnresolved(local_name, CONST);
     import_default_declaration = factory()->NewImportDeclaration(
         proxy, ast_value_factory()->default_string(), NULL, scope_, pos);
     Declare(import_default_declaration, DeclarationDescriptor::NORMAL, true,
@@ -1561,7 +1581,10 @@
             pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
         result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
       } else {
-        result = ParseFunctionDeclaration(pos, is_generator, &names, CHECK_OK);
+        result = ParseHoistableDeclaration(
+            pos, is_generator ? ParseFunctionFlags::kIsGenerator
+                              : ParseFunctionFlags::kIsNormal,
+            &names, CHECK_OK);
       }
       break;
     }
@@ -1580,6 +1603,30 @@
       }
       break;
 
+    case Token::ASYNC:
+      if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+          !scanner()->HasAnyLineTerminatorAfterNext()) {
+        Consume(Token::ASYNC);
+        Consume(Token::FUNCTION);
+        int pos = position();
+        if (peek() == Token::LPAREN) {
+          // AsyncFunctionDeclaration[+Default] ::
+          //   async [no LineTerminator here] function ( FormalParameters ) {
+          //      AsyncFunctionBody
+          //   }
+          default_export = ParseFunctionLiteral(
+              default_string, Scanner::Location::invalid(),
+              kSkipFunctionNameCheck, FunctionKind::kAsyncFunction, pos,
+              FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
+          result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+        } else {
+          result = ParseHoistableDeclaration(pos, ParseFunctionFlags::kIsAsync,
+                                             &names, CHECK_OK);
+        }
+        break;
+      }
+    /* falls through */
+
     default: {
       int pos = peek_position();
       ExpressionClassifier classifier(this);
@@ -1690,7 +1737,7 @@
     }
 
     case Token::FUNCTION:
-      result = ParseFunctionDeclaration(&names, CHECK_OK);
+      result = ParseHoistableDeclaration(&names, CHECK_OK);
       break;
 
     case Token::CLASS:
@@ -1704,6 +1751,14 @@
       result = ParseVariableStatement(kStatementListItem, &names, CHECK_OK);
       break;
 
+    case Token::ASYNC:
+      if (allow_harmony_async_await()) {
+        Consume(Token::ASYNC);
+        result = ParseAsyncFunctionDeclaration(&names, CHECK_OK);
+        break;
+      }
+    /* falls through */
+
     default:
       *ok = false;
       ReportUnexpectedToken(scanner()->current_token());
@@ -1881,6 +1936,7 @@
   DCHECK(proxy->raw_name() != NULL);
   const AstRawString* name = proxy->raw_name();
   VariableMode mode = declaration->mode();
+  DCHECK(IsDeclaredVariableMode(mode) && mode != CONST_LEGACY);
   bool is_function_declaration = declaration->IsFunctionDeclaration();
   if (scope == nullptr) scope = scope_;
   Scope* declaration_scope =
@@ -1912,11 +1968,8 @@
       }
       var = declaration_scope->DeclareLocal(
           name, mode, declaration->initialization(), kind, kNotAssigned);
-    } else if ((IsLexicalVariableMode(mode) ||
-                IsLexicalVariableMode(var->mode())) &&
-               // Lexical bindings may appear for some parameters in sloppy
-               // mode even with --harmony-sloppy off.
-               (is_strict(language_mode()) || allow_harmony_sloppy())) {
+    } else if (IsLexicalVariableMode(mode) ||
+               IsLexicalVariableMode(var->mode())) {
       // Allow duplicate function decls for web compat, see bug 4693.
       if (is_sloppy(language_mode()) && is_function_declaration &&
           var->is_function()) {
@@ -1986,14 +2039,6 @@
   // RuntimeHidden_DeclareLookupSlot calls.
   declaration_scope->AddDeclaration(declaration);
 
-  if (mode == CONST_LEGACY && declaration_scope->is_script_scope()) {
-    // For global const variables we bind the proxy to a variable.
-    DCHECK(resolve);  // should be set by all callers
-    Variable::Kind kind = Variable::NORMAL;
-    var = new (zone()) Variable(declaration_scope, name, mode, kind,
-                                kNeedsInitialization, kNotAssigned);
-  }
-
   // If requested and we have a local variable, bind the proxy to the variable
   // at parse-time. This is used for functions (and consts) declared inside
   // statements: the corresponding function (or const) variable must be in the
@@ -2069,17 +2114,33 @@
 }
 
 
-Statement* Parser::ParseFunctionDeclaration(
+Statement* Parser::ParseHoistableDeclaration(
     ZoneList<const AstRawString*>* names, bool* ok) {
   Expect(Token::FUNCTION, CHECK_OK);
   int pos = position();
-  bool is_generator = Check(Token::MUL);
-  return ParseFunctionDeclaration(pos, is_generator, names, ok);
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+  if (Check(Token::MUL)) {
+    flags |= ParseFunctionFlags::kIsGenerator;
+  }
+  return ParseHoistableDeclaration(pos, flags, names, ok);
 }
 
+Statement* Parser::ParseAsyncFunctionDeclaration(
+    ZoneList<const AstRawString*>* names, bool* ok) {
+  DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+  int pos = position();
+  if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+    *ok = false;
+    ReportUnexpectedToken(scanner()->current_token());
+    return nullptr;
+  }
+  Expect(Token::FUNCTION, CHECK_OK);
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
+  return ParseHoistableDeclaration(pos, flags, names, ok);
+}
 
-Statement* Parser::ParseFunctionDeclaration(
-    int pos, bool is_generator, ZoneList<const AstRawString*>* names,
+Statement* Parser::ParseHoistableDeclaration(
+    int pos, ParseFunctionFlags flags, ZoneList<const AstRawString*>* names,
     bool* ok) {
   // FunctionDeclaration ::
   //   'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
@@ -2087,10 +2148,21 @@
   //   'function' '*' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
   //
   // 'function' and '*' (if present) have been consumed by the caller.
+  const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
+  const bool is_async = flags & ParseFunctionFlags::kIsAsync;
+  DCHECK(!is_generator || !is_async);
+
   bool is_strict_reserved = false;
   const AstRawString* name = ParseIdentifierOrStrictReservedWord(
       &is_strict_reserved, CHECK_OK);
 
+  if (V8_UNLIKELY(is_async_function() && this->IsAwait(name))) {
+    ReportMessageAt(scanner()->location(),
+                    MessageTemplate::kAwaitBindingIdentifier);
+    *ok = false;
+    return nullptr;
+  }
+
   FuncNameInferrer::State fni_state(fni_);
   if (fni_ != NULL) fni_->PushEnclosingName(name);
   FunctionLiteral* fun = ParseFunctionLiteral(
@@ -2098,7 +2170,8 @@
       is_strict_reserved ? kFunctionNameIsStrictReserved
                          : kFunctionNameValidityUnknown,
       is_generator ? FunctionKind::kGeneratorFunction
-                   : FunctionKind::kNormalFunction,
+                   : is_async ? FunctionKind::kAsyncFunction
+                              : FunctionKind::kNormalFunction,
       pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
 
   // Even if we're not at the top-level of the global or a function
@@ -2107,18 +2180,15 @@
   // In ES6, a function behaves as a lexical binding, except in
   // a script scope, or the initial scope of eval or another function.
   VariableMode mode =
-      (is_strict(language_mode()) || allow_harmony_sloppy_function()) &&
-      !scope_->is_declaration_scope()
-          ? LET
-          : VAR;
+      (!scope_->is_declaration_scope() || scope_->is_module_scope()) ? LET
+                                                                     : VAR;
   VariableProxy* proxy = NewUnresolved(name, mode);
   Declaration* declaration =
       factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
   Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
   if (names) names->Add(name, zone());
   EmptyStatement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
-  if (is_sloppy(language_mode()) && allow_harmony_sloppy_function() &&
-      !scope_->is_declaration_scope()) {
+  if (is_sloppy(language_mode()) && !scope_->is_declaration_scope()) {
     SloppyBlockFunctionStatement* delegate =
         factory()->NewSloppyBlockFunctionStatement(empty, scope_);
     scope_->DeclarationScope()->sloppy_block_function_map()->Declare(name,
@@ -2146,12 +2216,6 @@
   //
   // so rewrite it as such.
 
-  if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
-    ReportMessage(MessageTemplate::kSloppyLexical);
-    *ok = false;
-    return NULL;
-  }
-
   int pos = position();
   bool is_strict_reserved = false;
   const AstRawString* name =
@@ -2282,12 +2346,11 @@
 
   if (peek() == Token::VAR) {
     Consume(Token::VAR);
-  } else if (peek() == Token::CONST && allow_const()) {
+  } else if (peek() == Token::CONST) {
     Consume(Token::CONST);
-    DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
     DCHECK(var_context != kStatement);
     parsing_result->descriptor.mode = CONST;
-  } else if (peek() == Token::LET && allow_let()) {
+  } else if (peek() == Token::LET) {
     Consume(Token::LET);
     DCHECK(var_context != kStatement);
     parsing_result->descriptor.mode = LET;
@@ -2370,9 +2433,8 @@
           return nullptr;
         }
 
-        // 'let x' and (legacy) 'const x' initialize 'x' to undefined.
-        if (parsing_result->descriptor.mode == LET ||
-            parsing_result->descriptor.mode == CONST_LEGACY) {
+        // 'let x' initializes 'x' to undefined.
+        if (parsing_result->descriptor.mode == LET) {
           value = GetLiteralUndefined(position());
         }
       }
@@ -2419,6 +2481,23 @@
   return false;
 }
 
+Statement* Parser::ParseFunctionDeclaration(bool* ok) {
+  Consume(Token::FUNCTION);
+  int pos = position();
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+  if (Check(Token::MUL)) {
+    flags |= ParseFunctionFlags::kIsGenerator;
+    if (allow_harmony_restrictive_declarations()) {
+      ParserTraits::ReportMessageAt(scanner()->location(),
+                                    MessageTemplate::kGeneratorInLegacyContext);
+      *ok = false;
+      return nullptr;
+    }
+  }
+
+  return ParseHoistableDeclaration(pos, flags, nullptr, CHECK_OK);
+}
+
 Statement* Parser::ParseExpressionOrLabelledStatement(
     ZoneList<const AstRawString*>* labels,
     AllowLabelledFunctionStatement allow_function, bool* ok) {
@@ -2475,7 +2554,7 @@
     // ES#sec-labelled-function-declarations Labelled Function Declarations
     if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
       if (allow_function == kAllowLabelledFunctionStatement) {
-        return ParseFunctionDeclaration(labels, ok);
+        return ParseFunctionDeclaration(ok);
       } else {
         return ParseScopedStatement(labels, true, ok);
       }
@@ -2496,15 +2575,6 @@
   }
 
   // Parsed expression statement, followed by semicolon.
-  // Detect attempts at 'let' declarations in sloppy mode.
-  if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
-      expr->AsVariableProxy() != NULL &&
-      expr->AsVariableProxy()->raw_name() ==
-          ast_value_factory()->let_string()) {
-    ReportMessage(MessageTemplate::kSloppyLexical, NULL);
-    *ok = false;
-    return NULL;
-  }
   ExpectSemicolon(CHECK_OK);
   return factory()->NewExpressionStatement(expr, pos);
 }
@@ -2624,9 +2694,13 @@
     }
   } else {
     int pos = peek_position();
-    return_value = ParseExpression(true, CHECK_OK);
 
     if (IsSubclassConstructor(function_state_->kind())) {
+      // Because of the return code rewriting that happens in case of a subclass
+      // constructor we don't want to accept tail calls, therefore we don't set
+      // ReturnExprScope to kInsideValidReturnStatement here.
+      return_value = ParseExpression(true, CHECK_OK);
+
       // For subclass constructors we need to return this in case of undefined
       // return a Smi (transformed into an exception in the ConstructStub)
       // for a non object.
@@ -2665,17 +2739,23 @@
       return_value = factory()->NewConditional(
           is_undefined, ThisExpression(scope_, factory(), pos),
           is_object_conditional, pos);
-    }
+    } else {
+      ReturnExprScope maybe_allow_tail_calls(
+          function_state_, ReturnExprContext::kInsideValidReturnStatement);
+      return_value = ParseExpression(true, CHECK_OK);
 
-    // ES6 14.6.1 Static Semantics: IsInTailPosition
-    if (allow_tailcalls() && !is_sloppy(language_mode())) {
-      function_state_->AddExpressionInTailPosition(return_value);
+      if (allow_tailcalls() && !is_sloppy(language_mode())) {
+        // ES6 14.6.1 Static Semantics: IsInTailPosition
+        function_state_->AddImplicitTailCallExpression(return_value);
+      }
     }
   }
   ExpectSemicolon(CHECK_OK);
 
   if (is_generator()) {
     return_value = BuildIteratorResult(return_value, true);
+  } else if (is_async_function()) {
+    return_value = BuildPromiseResolve(return_value, return_value->position());
   }
 
   result = factory()->NewReturnStatement(return_value, loc.beg_pos);
@@ -2849,40 +2929,6 @@
       factory()->NewThrow(exception, pos), pos);
 }
 
-class Parser::DontCollectExpressionsInTailPositionScope {
- public:
-  DontCollectExpressionsInTailPositionScope(
-      Parser::FunctionState* function_state)
-      : function_state_(function_state),
-        old_value_(function_state->collect_expressions_in_tail_position()) {
-    function_state->set_collect_expressions_in_tail_position(false);
-  }
-  ~DontCollectExpressionsInTailPositionScope() {
-    function_state_->set_collect_expressions_in_tail_position(old_value_);
-  }
-
- private:
-  Parser::FunctionState* function_state_;
-  bool old_value_;
-};
-
-// Collects all return expressions at tail call position in this scope
-// to a separate list.
-class Parser::CollectExpressionsInTailPositionToListScope {
- public:
-  CollectExpressionsInTailPositionToListScope(
-      Parser::FunctionState* function_state, List<Expression*>* list)
-      : function_state_(function_state), list_(list) {
-    function_state->expressions_in_tail_position().Swap(list_);
-  }
-  ~CollectExpressionsInTailPositionToListScope() {
-    function_state_->expressions_in_tail_position().Swap(list_);
-  }
-
- private:
-  Parser::FunctionState* function_state_;
-  List<Expression*>* list_;
-};
 
 TryStatement* Parser::ParseTryStatement(bool* ok) {
   // TryStatement ::
@@ -2901,7 +2947,8 @@
 
   Block* try_block;
   {
-    DontCollectExpressionsInTailPositionScope no_tail_calls(function_state_);
+    ReturnExprScope no_tail_calls(function_state_,
+                                  ReturnExprContext::kInsideTryBlock);
     try_block = ParseBlock(NULL, CHECK_OK);
   }
 
@@ -2915,7 +2962,7 @@
   Scope* catch_scope = NULL;
   Variable* catch_variable = NULL;
   Block* catch_block = NULL;
-  List<Expression*> expressions_in_tail_position_in_catch_block;
+  TailCallExpressionList tail_call_expressions_in_catch_block(zone());
   if (tok == Token::CATCH) {
     Consume(Token::CATCH);
 
@@ -2942,8 +2989,8 @@
 
     {
       CollectExpressionsInTailPositionToListScope
-          collect_expressions_in_tail_position_scope(
-              function_state_, &expressions_in_tail_position_in_catch_block);
+          collect_tail_call_expressions_scope(
+              function_state_, &tail_call_expressions_in_catch_block);
       BlockState block_state(&scope_, catch_scope);
 
       // TODO(adamk): Make a version of ParseBlock that takes a scope and
@@ -2967,8 +3014,11 @@
           descriptor.declaration_pos = pattern->position();
           descriptor.initialization_pos = pattern->position();
 
+          // Initializer position for variables declared by the pattern.
+          const int initializer_position = position();
+
           DeclarationParsingResult::Declaration decl(
-              pattern, pattern->position(),
+              pattern, initializer_position,
               factory()->NewVariableProxy(catch_variable));
 
           Block* init_block =
@@ -3023,14 +3073,23 @@
   if (catch_block != NULL) {
     // For a try-catch construct append return expressions from the catch block
     // to the list of return expressions.
-    function_state_->expressions_in_tail_position().AddAll(
-        expressions_in_tail_position_in_catch_block);
+    function_state_->tail_call_expressions().Append(
+        tail_call_expressions_in_catch_block);
 
     DCHECK(finally_block == NULL);
     DCHECK(catch_scope != NULL && catch_variable != NULL);
     result = factory()->NewTryCatchStatement(try_block, catch_scope,
                                              catch_variable, catch_block, pos);
   } else {
+    if (FLAG_harmony_explicit_tailcalls &&
+        tail_call_expressions_in_catch_block.has_explicit_tail_calls()) {
+      // TODO(ishell): update chapter number.
+      // ES8 XX.YY.ZZ
+      ReportMessageAt(tail_call_expressions_in_catch_block.location(),
+                      MessageTemplate::kUnexpectedTailCallInCatchBlock);
+      *ok = false;
+      return NULL;
+    }
     DCHECK(finally_block != NULL);
     result = factory()->NewTryFinallyStatement(try_block, finally_block, pos);
   }
@@ -3125,11 +3184,10 @@
 
 void Parser::InitializeForEachStatement(ForEachStatement* stmt,
                                         Expression* each, Expression* subject,
-                                        Statement* body) {
+                                        Statement* body, int each_keyword_pos) {
   ForOfStatement* for_of = stmt->AsForOfStatement();
   if (for_of != NULL) {
-    InitializeForOfStatement(for_of, each, subject, body,
-                             RelocInfo::kNoPosition);
+    InitializeForOfStatement(for_of, each, subject, body, each_keyword_pos);
   } else {
     if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
       Variable* temp =
@@ -3148,13 +3206,13 @@
       body = block;
       each = factory()->NewVariableProxy(temp);
     }
-    stmt->Initialize(each, subject, body);
+    stmt->AsForInStatement()->Initialize(each, subject, body);
   }
 }
 
 void Parser::InitializeForOfStatement(ForOfStatement* for_of, Expression* each,
                                       Expression* iterable, Statement* body,
-                                      int iterable_pos) {
+                                      int next_result_pos) {
   Variable* iterator =
       scope_->NewTemporary(ast_value_factory()->dot_iterator_string());
   Variable* result =
@@ -3165,14 +3223,7 @@
   Expression* result_done;
   Expression* assign_each;
 
-  // Hackily disambiguate o from o.next and o [Symbol.iterator]().
-  // TODO(verwaest): Come up with a better solution.
-  int get_iterator_pos = iterable_pos != RelocInfo::kNoPosition
-                             ? iterable_pos
-                             : iterable->position() - 2;
-  int next_result_pos = iterable_pos != RelocInfo::kNoPosition
-                            ? iterable_pos
-                            : iterable->position() - 1;
+  int get_iterator_pos = iterable->position();
 
   // iterator = iterable[Symbol.iterator]()
   assign_iterator = factory()->NewAssignment(
@@ -3212,8 +3263,8 @@
     }
   }
 
-  for_of->Initialize(each, iterable, body, iterator, assign_iterator,
-                     next_result, result_done, assign_each);
+  for_of->Initialize(body, iterator, assign_iterator, next_result, result_done,
+                     assign_each);
 }
 
 Statement* Parser::DesugarLexicalBindingsInForStatement(
@@ -3476,9 +3527,10 @@
     // Make a block around the statement for a lexical binding
     // is introduced by a FunctionDeclaration.
     Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
+    body_scope->set_start_position(scanner()->location().beg_pos);
     BlockState block_state(&scope_, body_scope);
     Block* block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
-    Statement* body = ParseFunctionDeclaration(NULL, CHECK_OK);
+    Statement* body = ParseFunctionDeclaration(CHECK_OK);
     block->statements()->Add(body, zone());
     body_scope->set_end_position(scanner()->location().end_pos);
     body_scope = body_scope->FinalizeBlockScope();
@@ -3500,10 +3552,10 @@
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   for_scope->set_start_position(scanner()->location().beg_pos);
-  bool is_let_identifier_expression = false;
+  for_scope->set_is_hidden();
   DeclarationParsingResult parsing_result;
   if (peek() != Token::SEMICOLON) {
-    if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
+    if (peek() == Token::VAR || peek() == Token::CONST ||
         (peek() == Token::LET && IsNextLetKeyword())) {
       ParseVariableDeclarations(kForStatement, &parsing_result, nullptr,
                                 CHECK_OK);
@@ -3527,7 +3579,12 @@
         if (parsing_result.first_initializer_loc.IsValid() &&
             (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
              IsLexicalVariableMode(parsing_result.descriptor.mode) ||
-             !decl.pattern->IsVariableProxy())) {
+             !decl.pattern->IsVariableProxy() || allow_harmony_for_in())) {
+          // Only increment the use count if we would have let this through
+          // without the flag.
+          if (allow_harmony_for_in()) {
+            ++use_counts_[v8::Isolate::kForInInitializer];
+          }
           ParserTraits::ReportMessageAt(
               parsing_result.first_initializer_loc,
               MessageTemplate::kForInOfLoopInitializer,
@@ -3541,6 +3598,7 @@
         // special case for legacy for (var/const x =.... in)
         if (!IsLexicalVariableMode(parsing_result.descriptor.mode) &&
             decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+          DCHECK(!allow_harmony_for_in());
           ++use_counts_[v8::Isolate::kForInInitializer];
           const AstRawString* name =
               decl.pattern->AsVariableProxy()->raw_name();
@@ -3579,6 +3637,8 @@
             factory()->NewForEachStatement(mode, labels, stmt_pos);
         Target target(&this->target_stack_, loop);
 
+        int each_keyword_position = scanner()->location().beg_pos;
+
         Expression* enumerable;
         if (mode == ForEachStatement::ITERATE) {
           ExpressionClassifier classifier(this);
@@ -3597,8 +3657,8 @@
             factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
 
         {
-          DontCollectExpressionsInTailPositionScope no_tail_calls(
-              function_state_);
+          ReturnExprScope no_tail_calls(function_state_,
+                                        ReturnExprContext::kInsideForInOfBody);
           BlockState block_state(&scope_, body_scope);
 
           Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
@@ -3622,7 +3682,8 @@
           body_block->statements()->Add(body, zone());
           VariableProxy* temp_proxy =
               factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
-          InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
+          InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
+                                     each_keyword_position);
         }
         body_scope->set_end_position(scanner()->location().end_pos);
         body_scope = body_scope->FinalizeBlockScope();
@@ -3678,10 +3739,6 @@
       Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
       int lhs_end_pos = scanner()->location().end_pos;
       ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
-      is_let_identifier_expression =
-          expression->IsVariableProxy() &&
-          expression->AsVariableProxy()->raw_name() ==
-              ast_value_factory()->let_string();
 
       bool is_for_each = CheckInOrOf(&mode, ok);
       if (!*ok) return nullptr;
@@ -3705,6 +3762,8 @@
             factory()->NewForEachStatement(mode, labels, stmt_pos);
         Target target(&this->target_stack_, loop);
 
+        int each_keyword_position = scanner()->location().beg_pos;
+
         Expression* enumerable;
         if (mode == ForEachStatement::ITERATE) {
           ExpressionClassifier classifier(this);
@@ -3719,7 +3778,8 @@
         // For legacy compat reasons, give for loops similar treatment to
         // if statements in allowing a function declaration for a body
         Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
-        InitializeForEachStatement(loop, expression, enumerable, body);
+        InitializeForEachStatement(loop, expression, enumerable, body,
+                                   each_keyword_position);
 
         Statement* final_loop = loop->IsForOfStatement()
             ? FinalizeForOfStatement(
@@ -3742,13 +3802,6 @@
   Target target(&this->target_stack_, loop);
 
   // Parsed initializer at this point.
-  // Detect attempts at 'let' declarations in sloppy mode.
-  if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
-      is_sloppy(language_mode()) && is_let_identifier_expression) {
-    ReportMessage(MessageTemplate::kSloppyLexical, NULL);
-    *ok = false;
-    return NULL;
-  }
   Expect(Token::SEMICOLON, CHECK_OK);
 
   Expression* cond = NULL;
@@ -3882,16 +3935,9 @@
   return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
 }
 
-
 void ParserTraits::ParseArrowFunctionFormalParameters(
-    ParserFormalParameters* parameters, Expression* expr,
-    const Scanner::Location& params_loc, bool* ok) {
-  if (parameters->Arity() >= Code::kMaxArguments) {
-    ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
-    *ok = false;
-    return;
-  }
-
+    ParserFormalParameters* parameters, Expression* expr, int end_pos,
+    bool* ok) {
   // ArrowFunctionFormals ::
   //    Binary(Token::COMMA, NonTailArrowFunctionFormals, Tail)
   //    Tail
@@ -3912,7 +3958,8 @@
     DCHECK_EQ(binop->op(), Token::COMMA);
     Expression* left = binop->left();
     Expression* right = binop->right();
-    ParseArrowFunctionFormalParameters(parameters, left, params_loc, ok);
+    int comma_pos = binop->position();
+    ParseArrowFunctionFormalParameters(parameters, left, comma_pos, ok);
     if (!*ok) return;
     // LHS of comma expression should be unparenthesized.
     expr = right;
@@ -3949,13 +3996,66 @@
                                      parser_->scope_, parameters->scope);
   }
 
-  // TODO(adamk): params_loc.end_pos is not the correct initializer position,
-  // but it should be conservative enough to trigger hole checks for variables
-  // referenced in the initializer (if any).
-  AddFormalParameter(parameters, expr, initializer, params_loc.end_pos,
-                     is_rest);
+  AddFormalParameter(parameters, expr, initializer, end_pos, is_rest);
 }
 
+void ParserTraits::ParseAsyncArrowSingleExpressionBody(
+    ZoneList<Statement*>* body, bool accept_IN,
+    Type::ExpressionClassifier* classifier, int pos, bool* ok) {
+  parser_->DesugarAsyncFunctionBody(
+      parser_->ast_value_factory()->empty_string(), parser_->scope_, body,
+      classifier, kAsyncArrowFunction, FunctionBody::SingleExpression,
+      accept_IN, pos, ok);
+}
+
+void Parser::DesugarAsyncFunctionBody(const AstRawString* function_name,
+                                      Scope* scope, ZoneList<Statement*>* body,
+                                      ExpressionClassifier* classifier,
+                                      FunctionKind kind, FunctionBody body_type,
+                                      bool accept_IN, int pos, bool* ok) {
+  // function async_function() {
+  //   try {
+  //     .generator_object = %CreateGeneratorObject();
+  //     ... function body ...
+  //   } catch (e) {
+  //     return Promise.reject(e);
+  //   }
+  // }
+  scope->ForceContextAllocation();
+  Variable* temp =
+      scope_->NewTemporary(ast_value_factory()->dot_generator_object_string());
+  function_state_->set_generator_object_variable(temp);
+
+  Expression* init_generator_variable = factory()->NewAssignment(
+      Token::INIT, factory()->NewVariableProxy(temp),
+      BuildCreateJSGeneratorObject(pos, kind), RelocInfo::kNoPosition);
+  body->Add(factory()->NewExpressionStatement(init_generator_variable,
+                                              RelocInfo::kNoPosition),
+            zone());
+
+  Block* try_block = factory()->NewBlock(NULL, 8, true, RelocInfo::kNoPosition);
+
+  ZoneList<Statement*>* inner_body = try_block->statements();
+
+  Expression* return_value = nullptr;
+  if (body_type == FunctionBody::Normal) {
+    ParseStatementList(inner_body, Token::RBRACE, ok);
+    if (!*ok) return;
+    return_value = factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
+  } else {
+    return_value = ParseAssignmentExpression(accept_IN, classifier, ok);
+    if (!*ok) return;
+    ParserTraits::RewriteNonPattern(classifier, ok);
+    if (!*ok) return;
+  }
+
+  return_value = BuildPromiseResolve(return_value, return_value->position());
+  inner_body->Add(
+      factory()->NewReturnStatement(return_value, return_value->position()),
+      zone());
+  body->Add(BuildRejectPromiseOnException(try_block), zone());
+  scope->set_end_position(scanner()->location().end_pos);
+}
 
 DoExpression* Parser::ParseDoExpression(bool* ok) {
   // AssignmentExpression ::
@@ -3982,9 +4082,15 @@
     Scanner::Location* duplicate_loc, bool* ok) {
   if (expr->IsEmptyParentheses()) return;
 
-  ParseArrowFunctionFormalParameters(parameters, expr, params_loc, ok);
+  ParseArrowFunctionFormalParameters(parameters, expr, params_loc.end_pos, ok);
   if (!*ok) return;
 
+  if (parameters->Arity() > Code::kMaxArguments) {
+    ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
+    *ok = false;
+    return;
+  }
+
   Type::ExpressionClassifier classifier(parser_);
   if (!parameters->is_simple) {
     classifier.RecordNonSimpleParameter();
@@ -4044,53 +4150,17 @@
     function_name = ast_value_factory()->empty_string();
   }
 
-  // Function declarations are function scoped in normal mode, so they are
-  // hoisted. In harmony block scoping mode they are block scoped, so they
-  // are not hoisted.
-  //
-  // One tricky case are function declarations in a local sloppy-mode eval:
-  // their declaration is hoisted, but they still see the local scope. E.g.,
-  //
-  // function() {
-  //   var x = 0
-  //   try { throw 1 } catch (x) { eval("function g() { return x }") }
-  //   return g()
-  // }
-  //
-  // needs to return 1. To distinguish such cases, we need to detect
-  // (1) whether a function stems from a sloppy eval, and
-  // (2) whether it actually hoists across the eval.
-  // Unfortunately, we do not represent sloppy eval scopes, so we do not have
-  // either information available directly, especially not when lazily compiling
-  // a function like 'g'. We hence rely on the following invariants:
-  // - (1) is the case iff the innermost scope of the deserialized scope chain
-  //   under which we compile is _not_ a declaration scope. This holds because
-  //   in all normal cases, function declarations are fully hoisted to a
-  //   declaration scope and compiled relative to that.
-  // - (2) is the case iff the current declaration scope is still the original
-  //   one relative to the deserialized scope chain. Otherwise we must be
-  //   compiling a function in an inner declaration scope in the eval, e.g. a
-  //   nested function, and hoisting works normally relative to that.
-  Scope* declaration_scope = scope_->DeclarationScope();
-  Scope* original_declaration_scope = original_scope_->DeclarationScope();
-  Scope* scope = function_type == FunctionLiteral::kDeclaration &&
-                         is_sloppy(language_mode) &&
-                         !allow_harmony_sloppy_function() &&
-                         (original_scope_ == original_declaration_scope ||
-                          declaration_scope != original_declaration_scope)
-                     ? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
-                     : NewScope(scope_, FUNCTION_SCOPE, kind);
+  Scope* scope = NewScope(scope_, FUNCTION_SCOPE, kind);
   SetLanguageMode(scope, language_mode);
   ZoneList<Statement*>* body = NULL;
   int arity = -1;
   int materialized_literal_count = -1;
   int expected_property_count = -1;
   DuplicateFinder duplicate_finder(scanner()->unicode_cache());
-  FunctionLiteral::EagerCompileHint eager_compile_hint =
-      parenthesized_function_ ? FunctionLiteral::kShouldEagerCompile
-                              : FunctionLiteral::kShouldLazyCompile;
   bool should_be_used_once_hint = false;
   bool has_duplicate_parameters;
+  FunctionLiteral::EagerCompileHint eager_compile_hint;
+
   // Parse function.
   {
     AstNodeFactory function_factory(ast_value_factory());
@@ -4099,6 +4169,10 @@
     scope_->SetScopeName(function_name);
     ExpressionClassifier formals_classifier(this, &duplicate_finder);
 
+    eager_compile_hint = function_state_->this_function_is_parenthesized()
+                             ? FunctionLiteral::kShouldEagerCompile
+                             : FunctionLiteral::kShouldLazyCompile;
+
     if (is_generator) {
       // For generators, allocating variables in contexts is currently a win
       // because it minimizes the work needed to suspend and resume an
@@ -4126,7 +4200,6 @@
     CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
                            formals_end_position, CHECK_OK);
     Expect(Token::LBRACE, CHECK_OK);
-
     // Don't include the rest parameter into the function's formal parameter
     // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
     // which says whether we need to create an arguments adaptor frame).
@@ -4167,8 +4240,7 @@
     // logic where only top-level functions will be parsed lazily.
     bool is_lazily_parsed = mode() == PARSE_LAZILY &&
                             scope_->AllowsLazyParsing() &&
-                            !parenthesized_function_;
-    parenthesized_function_ = false;  // The bit was set for this function only.
+                            !function_state_->this_function_is_parenthesized();
 
     // Eager or lazy parse?
     // If is_lazily_parsed, we'll parse lazy. If we can set a bookmark, we'll
@@ -4211,10 +4283,13 @@
       //   FunctionExpression; even without enclosing parentheses it might be
       //   immediately invoked.
       // - The function literal shouldn't be hinted to eagerly compile.
+      // - For asm.js functions the body needs to be available when module
+      //   validation is active, because we examine the entire module at once.
       bool use_temp_zone =
           FLAG_lazy && !allow_natives() && extension_ == NULL && allow_lazy() &&
           function_type == FunctionLiteral::kDeclaration &&
-          eager_compile_hint != FunctionLiteral::kShouldEagerCompile;
+          eager_compile_hint != FunctionLiteral::kShouldEagerCompile &&
+          !(FLAG_validate_asm && scope->asm_function());
       // Open a new BodyScope, which sets our AstNodeFactory to allocate in the
       // new temporary zone if the preconditions are satisfied, and ensures that
       // the previous zone is always restored after parsing the body.
@@ -4253,8 +4328,10 @@
     if (is_strict(language_mode)) {
       CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
                               CHECK_OK);
+      CheckDecimalLiteralWithLeadingZero(use_counts_, scope->start_position(),
+                                         scope->end_position());
     }
-    if (is_sloppy(language_mode) && allow_harmony_sloppy_function()) {
+    if (is_sloppy(language_mode)) {
       InsertSloppyBlockFunctionVarBindings(scope, CHECK_OK);
     }
     CheckConflictingVarDeclarations(scope, CHECK_OK);
@@ -4283,6 +4360,36 @@
   return function_literal;
 }
 
+Expression* Parser::ParseAsyncFunctionExpression(bool* ok) {
+  // AsyncFunctionDeclaration ::
+  //   async [no LineTerminator here] function ( FormalParameters[Await] )
+  //       { AsyncFunctionBody }
+  //
+  //   async [no LineTerminator here] function BindingIdentifier[Await]
+  //       ( FormalParameters[Await] ) { AsyncFunctionBody }
+  DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+  int pos = position();
+  Expect(Token::FUNCTION, CHECK_OK);
+  bool is_strict_reserved = false;
+  const AstRawString* name = nullptr;
+  FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
+
+  if (peek_any_identifier()) {
+    type = FunctionLiteral::kNamedExpression;
+    name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+    if (this->IsAwait(name)) {
+      ReportMessageAt(scanner()->location(),
+                      MessageTemplate::kAwaitBindingIdentifier);
+      *ok = false;
+      return nullptr;
+    }
+  }
+  return ParseFunctionLiteral(name, scanner()->location(),
+                              is_strict_reserved ? kFunctionNameIsStrictReserved
+                                                 : kFunctionNameValidityUnknown,
+                              FunctionKind::kAsyncFunction, pos, type,
+                              language_mode(), CHECK_OK);
+}
 
 void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
                                   int* expected_property_count, bool* ok,
@@ -4473,12 +4580,18 @@
     if (!parameter.is_simple() && scope_->calls_sloppy_eval()) {
       param_scope = NewScope(scope_, BLOCK_SCOPE);
       param_scope->set_is_declaration_scope();
-      param_scope->set_start_position(parameter.pattern->position());
-      param_scope->set_end_position(RelocInfo::kNoPosition);
+      param_scope->set_start_position(descriptor.initialization_pos);
+      param_scope->set_end_position(parameter.initializer_end_position);
       param_scope->RecordEvalCall();
       param_block = factory()->NewBlock(NULL, 8, true, RelocInfo::kNoPosition);
       param_block->set_scope(param_scope);
       descriptor.hoist_scope = scope_;
+      // Pass the appropriate scope in so that PatternRewriter can appropriately
+      // rewrite inner initializers of the pattern to param_scope
+      descriptor.scope = param_scope;
+      // Rewrite the outer initializer to point to param_scope
+      RewriteParameterInitializerScope(stack_limit(), initial_value, scope_,
+                                       param_scope);
     }
 
     {
@@ -4500,6 +4613,57 @@
   return init_block;
 }
 
+Block* Parser::BuildRejectPromiseOnException(Block* block) {
+  // try { <block> } catch (error) { return Promise.reject(error); }
+  Block* try_block = block;
+  Scope* catch_scope = NewScope(scope_, CATCH_SCOPE);
+  catch_scope->set_is_hidden();
+  Variable* catch_variable =
+      catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
+                                kCreatedInitialized, Variable::NORMAL);
+  Block* catch_block =
+      factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
+
+  Expression* promise_reject = BuildPromiseReject(
+      factory()->NewVariableProxy(catch_variable), RelocInfo::kNoPosition);
+
+  ReturnStatement* return_promise_reject =
+      factory()->NewReturnStatement(promise_reject, RelocInfo::kNoPosition);
+  catch_block->statements()->Add(return_promise_reject, zone());
+  TryStatement* try_catch_statement =
+      factory()->NewTryCatchStatement(try_block, catch_scope, catch_variable,
+                                      catch_block, RelocInfo::kNoPosition);
+
+  block = factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
+  block->statements()->Add(try_catch_statement, zone());
+  return block;
+}
+
+Expression* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
+  DCHECK_NOT_NULL(function_state_->generator_object_variable());
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+  args->Add(factory()->NewThisFunction(pos), zone());
+  args->Add(IsArrowFunction(kind)
+                ? GetLiteralUndefined(pos)
+                : ThisExpression(scope_, factory(), RelocInfo::kNoPosition),
+            zone());
+  return factory()->NewCallRuntime(Runtime::kCreateJSGeneratorObject, args,
+                                   pos);
+}
+
+Expression* Parser::BuildPromiseResolve(Expression* value, int pos) {
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+  args->Add(value, zone());
+  return factory()->NewCallRuntime(Context::PROMISE_CREATE_RESOLVED_INDEX, args,
+                                   pos);
+}
+
+Expression* Parser::BuildPromiseReject(Expression* value, int pos) {
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+  args->Add(value, zone());
+  return factory()->NewCallRuntime(Context::PROMISE_CREATE_REJECTED_INDEX, args,
+                                   pos);
+}
 
 ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
     const AstRawString* function_name, int pos,
@@ -4553,10 +4717,7 @@
           factory()->NewBlock(nullptr, 3, false, RelocInfo::kNoPosition);
 
       {
-        ZoneList<Expression*>* arguments =
-            new (zone()) ZoneList<Expression*>(0, zone());
-        CallRuntime* allocation = factory()->NewCallRuntime(
-            Runtime::kCreateJSGeneratorObject, arguments, pos);
+        Expression* allocation = BuildCreateJSGeneratorObject(pos, kind);
         VariableProxy* init_proxy = factory()->NewVariableProxy(
             function_state_->generator_object_variable());
         Assignment* assignment = factory()->NewAssignment(
@@ -4592,6 +4753,10 @@
       body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
                                                   RelocInfo::kNoPosition),
                 zone());
+    } else if (IsAsyncFunction(kind)) {
+      const bool accept_IN = true;
+      DesugarAsyncFunctionBody(function_name, inner_scope, body, nullptr, kind,
+                               FunctionBody::Normal, accept_IN, pos, CHECK_OK);
     } else {
       ParseStatementList(body, Token::RBRACE, CHECK_OK);
     }
@@ -4613,6 +4778,11 @@
     DCHECK_EQ(body, inner_block->statements());
     SetLanguageMode(scope_, inner_scope->language_mode());
     Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
+
+    if (IsAsyncFunction(kind)) {
+      init_block = BuildRejectPromiseOnException(init_block);
+    }
+
     DCHECK_NOT_NULL(init_block);
 
     inner_scope->set_end_position(scanner()->location().end_pos);
@@ -4650,13 +4820,7 @@
                     RelocInfo::kNoPosition));
   }
 
-  // ES6 14.6.1 Static Semantics: IsInTailPosition
-  // Mark collected return expressions that are in tail call position.
-  const List<Expression*>& expressions_in_tail_position =
-      function_state_->expressions_in_tail_position();
-  for (int i = 0; i < expressions_in_tail_position.length(); ++i) {
-    MarkTailPosition(expressions_in_tail_position[i]);
-  }
+  MarkCollectedTailCallExpressions();
   return result;
 }
 
@@ -4678,19 +4842,18 @@
     reusable_preparser_->set_allow_lazy(true);
 #define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
     SET_ALLOW(natives);
-    SET_ALLOW(harmony_sloppy);
-    SET_ALLOW(harmony_sloppy_function);
-    SET_ALLOW(harmony_sloppy_let);
     SET_ALLOW(harmony_do_expressions);
+    SET_ALLOW(harmony_for_in);
     SET_ALLOW(harmony_function_name);
     SET_ALLOW(harmony_function_sent);
     SET_ALLOW(harmony_exponentiation_operator);
     SET_ALLOW(harmony_restrictive_declarations);
+    SET_ALLOW(harmony_async_await);
 #undef SET_ALLOW
   }
   PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
       language_mode(), function_state_->kind(), scope_->has_simple_parameters(),
-      logger, bookmark);
+      parsing_module_, logger, bookmark, use_counts_);
   if (pre_parse_timer_ != NULL) {
     pre_parse_timer_->Stop();
   }
@@ -4733,6 +4896,7 @@
     block_scope->set_start_position(scanner()->location().end_pos);
     ExpressionClassifier extends_classifier(this);
     extends = ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
+    CheckNoTailCallExpressions(&extends_classifier, CHECK_OK);
     RewriteNonPattern(&extends_classifier, CHECK_OK);
     if (classifier != nullptr) {
       classifier->Accumulate(&extends_classifier,
@@ -4755,13 +4919,12 @@
     if (Check(Token::SEMICOLON)) continue;
     FuncNameInferrer::State fni_state(fni_);
     const bool in_class = true;
-    const bool is_static = false;
     bool is_computed_name = false;  // Classes do not care about computed
                                     // property names here.
     ExpressionClassifier property_classifier(this);
     const AstRawString* property_name = nullptr;
     ObjectLiteral::Property* property = ParsePropertyDefinition(
-        &checker, in_class, has_extends, is_static, &is_computed_name,
+        &checker, in_class, has_extends, MethodKind::Normal, &is_computed_name,
         &has_seen_constructor, &property_classifier, &property_name, CHECK_OK);
     RewriteNonPattern(&property_classifier, CHECK_OK);
     if (classifier != nullptr) {
@@ -5379,6 +5542,29 @@
   SetLanguageMode(scope_, old > mode ? old : mode);
 }
 
+void Parser::MarkCollectedTailCallExpressions() {
+  const ZoneList<Expression*>& tail_call_expressions =
+      function_state_->tail_call_expressions().expressions();
+  for (int i = 0; i < tail_call_expressions.length(); ++i) {
+    Expression* expression = tail_call_expressions[i];
+    // If only FLAG_harmony_explicit_tailcalls is enabled then expression
+    // must be a Call expression.
+    DCHECK(FLAG_harmony_tailcalls || !FLAG_harmony_explicit_tailcalls ||
+           expression->IsCall());
+    MarkTailPosition(expression);
+  }
+}
+
+Expression* ParserTraits::ExpressionListToExpression(
+    ZoneList<Expression*>* args) {
+  AstNodeFactory* factory = parser_->factory();
+  Expression* expr = args->at(0);
+  for (int i = 1; i < args->length(); ++i) {
+    expr = factory->NewBinaryOperation(Token::COMMA, expr, args->at(i),
+                                       expr->position());
+  }
+  return expr;
+}
 
 void ParserTraits::RewriteDestructuringAssignments() {
   parser_->RewriteDestructuringAssignments();
@@ -5400,6 +5586,30 @@
   parser_->RewriteNonPattern(classifier, ok);
 }
 
+Expression* ParserTraits::RewriteAwaitExpression(Expression* value, int pos) {
+  // yield %AsyncFunctionAwait(.generator_object, <operand>)
+  Variable* generator_object_variable =
+      parser_->function_state_->generator_object_variable();
+
+  // If generator_object_variable is null,
+  if (!generator_object_variable) return value;
+
+  Expression* generator_object =
+      parser_->factory()->NewVariableProxy(generator_object_variable);
+
+  ZoneList<Expression*>* async_function_await_args =
+      new (zone()) ZoneList<Expression*>(2, zone());
+  async_function_await_args->Add(generator_object, zone());
+  async_function_await_args->Add(value, zone());
+  Expression* async_function_await = parser_->factory()->NewCallRuntime(
+      Context::ASYNC_FUNCTION_AWAIT_INDEX, async_function_await_args,
+      RelocInfo::kNoPosition);
+
+  generator_object =
+      parser_->factory()->NewVariableProxy(generator_object_variable);
+  return parser_->factory()->NewYield(generator_object, async_function_await,
+                                      pos);
+}
 
 Zone* ParserTraits::zone() const {
   return parser_->function_state_->scope()->zone();
@@ -5606,7 +5816,7 @@
           ForEachStatement::ITERATE, nullptr, RelocInfo::kNoPosition);
       InitializeForOfStatement(loop->AsForOfStatement(),
                                factory()->NewVariableProxy(each), subject,
-                               append_body, spread->expression_position());
+                               append_body);
       do_block->statements()->Add(loop, zone());
     }
   }
@@ -5749,15 +5959,19 @@
 //       }
 //     }
 //
-//     output.value;
+//     if (mode === kReturn) {
+//       return {value: output.value, done: true};
+//     }
+//     output.value
 //   }
 //
 // IteratorClose(iterator) expands to the following:
 //
 //   let iteratorReturn = iterator.return;
-//   if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return;
-//   let output = %_Call(iteratorReturn, iterator);
-//   if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+//   if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
+//     let output = %_Call(iteratorReturn, iterator);
+//     if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+//   }
 //
 // IteratorClose(iterator, input, output) expands to the following:
 //
@@ -5766,7 +5980,6 @@
 //   output = %_Call(iteratorReturn, iterator, input);
 //   if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
 
-
 Expression* ParserTraits::RewriteYieldStar(
     Expression* generator, Expression* iterable, int pos) {
 
@@ -5798,7 +6011,7 @@
   Statement* initialize_mode;
   {
     Expression* mode_proxy = factory->NewVariableProxy(var_mode);
-    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::kNext, nopos);
     Expression* assignment =
         factory->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
     initialize_mode = factory->NewExpressionStatement(assignment, nopos);
@@ -5929,9 +6142,9 @@
     Statement* throw_call = factory->NewExpressionStatement(call, nopos);
 
     Block* then = factory->NewBlock(nullptr, 4+1, false, nopos);
-    Variable* var_tmp = scope->NewTemporary(avfactory->empty_string());
-    BuildIteratorClose(then->statements(), var_iterator, Nothing<Variable*>(),
-                       var_tmp);
+    parser_->BuildIteratorCloseForCompletion(
+        then->statements(), var_iterator,
+        factory->NewSmiLiteral(Parser::kNormalCompletion, nopos));
     then->statements()->Add(throw_call, zone);
     check_throw = factory->NewIfStatement(
         condition, then, factory->NewEmptyStatement(nopos), nopos);
@@ -5996,7 +6209,7 @@
   {
     Expression* mode_proxy = factory->NewVariableProxy(var_mode);
     Expression* kreturn =
-        factory->NewSmiLiteral(JSGeneratorObject::RETURN, nopos);
+        factory->NewSmiLiteral(JSGeneratorObject::kReturn, nopos);
     Expression* assignment =
         factory->NewAssignment(Token::ASSIGN, mode_proxy, kreturn, nopos);
     set_mode_return = factory->NewExpressionStatement(assignment, nopos);
@@ -6015,7 +6228,7 @@
   Statement* set_mode_next;
   {
     Expression* mode_proxy = factory->NewVariableProxy(var_mode);
-    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::kNext, nopos);
     Expression* assignment =
         factory->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
     set_mode_next = factory->NewExpressionStatement(assignment, nopos);
@@ -6027,7 +6240,7 @@
   {
     Expression* mode_proxy = factory->NewVariableProxy(var_mode);
     Expression* kthrow =
-        factory->NewSmiLiteral(JSGeneratorObject::THROW, nopos);
+        factory->NewSmiLiteral(JSGeneratorObject::kThrow, nopos);
     Expression* assignment =
         factory->NewAssignment(Token::ASSIGN, mode_proxy, kthrow, nopos);
     set_mode_throw = factory->NewExpressionStatement(assignment, nopos);
@@ -6045,7 +6258,30 @@
   }
 
 
-  // output.value;
+  // if (mode === kReturn) {
+  //   return {value: output.value, done: true};
+  // }
+  Statement* maybe_return_value;
+  {
+    Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+    Expression* kreturn =
+        factory->NewSmiLiteral(JSGeneratorObject::kReturn, nopos);
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ_STRICT, mode_proxy, kreturn, nopos);
+
+    Expression* output_proxy = factory->NewVariableProxy(var_output);
+    Expression* literal =
+        factory->NewStringLiteral(avfactory->value_string(), nopos);
+    Expression* property = factory->NewProperty(output_proxy, literal, nopos);
+    Statement* return_value =
+        factory->NewReturnStatement(BuildIteratorResult(property, true), nopos);
+
+    maybe_return_value = factory->NewIfStatement(
+        condition, return_value, factory->NewEmptyStatement(nopos), nopos);
+  }
+
+
+  // output.value
   Statement* get_value;
   {
     Expression* output_proxy = factory->NewVariableProxy(var_output);
@@ -6070,6 +6306,7 @@
     catch_block->statements()->Add(set_mode_throw, zone);
 
     Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
+    catch_scope->set_is_hidden();
     const AstRawString* name = avfactory->dot_catch_string();
     Variable* catch_variable =
         catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
@@ -6104,7 +6341,7 @@
     case_next->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
 
     auto case_return = new (zone) ZoneList<Statement*>(5, zone);
-    BuildIteratorClose(case_return, var_iterator, Just(var_input), var_output);
+    BuildIteratorClose(case_return, var_iterator, var_input, var_output);
     case_return->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
 
     auto case_throw = new (zone) ZoneList<Statement*>(5, zone);
@@ -6115,11 +6352,11 @@
     case_throw->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
 
     auto cases = new (zone) ZoneList<CaseClause*>(3, zone);
-    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::kNext, nopos);
     Expression* kreturn =
-        factory->NewSmiLiteral(JSGeneratorObject::RETURN, nopos);
+        factory->NewSmiLiteral(JSGeneratorObject::kReturn, nopos);
     Expression* kthrow =
-        factory->NewSmiLiteral(JSGeneratorObject::THROW, nopos);
+        factory->NewSmiLiteral(JSGeneratorObject::kThrow, nopos);
     cases->Add(factory->NewCaseClause(knext, case_next, nopos), zone);
     cases->Add(factory->NewCaseClause(kreturn, case_return, nopos), zone);
     cases->Add(factory->NewCaseClause(kthrow, case_throw, nopos), zone);
@@ -6147,13 +6384,14 @@
     // The rewriter needs to process the get_value statement only, hence we
     // put the preceding statements into an init block.
 
-    Block* do_block_ = factory->NewBlock(nullptr, 6, true, nopos);
+    Block* do_block_ = factory->NewBlock(nullptr, 7, true, nopos);
     do_block_->statements()->Add(initialize_input, zone);
     do_block_->statements()->Add(initialize_mode, zone);
     do_block_->statements()->Add(initialize_output, zone);
     do_block_->statements()->Add(get_iterator, zone);
     do_block_->statements()->Add(validate_iterator, zone);
     do_block_->statements()->Add(loop, zone);
+    do_block_->statements()->Add(maybe_return_value, zone);
 
     Block* do_block = factory->NewBlock(nullptr, 2, false, nopos);
     do_block->statements()->Add(do_block_, zone);
@@ -6167,180 +6405,6 @@
   return yield_star;
 }
 
-// Desugaring of (lhs) instanceof (rhs)
-// ====================================
-//
-// We desugar instanceof into a load of property @@hasInstance on the rhs.
-// We end up with roughly the following code (O, C):
-//
-//   do {
-//     let O = lhs;
-//     let C = rhs;
-//     if (!IS_RECEIVER(C)) throw MakeTypeError(kNonObjectInInstanceOfCheck);
-//     let handler_result = C[Symbol.hasInstance];
-//     if (handler_result === undefined) {
-//       if (!IS_CALLABLE(C)) {
-//         throw MakeTypeError(kCalledNonCallableInstanceOf);
-//       }
-//       handler_result = %_GetOrdinaryHasInstance()
-//       handler_result = %_Call(handler_result, C, O);
-//     } else {
-//       handler_result = !!(%_Call(handler_result, C, O));
-//     }
-//     handler_result;
-//   }
-//
-Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
-                                            int pos) {
-  const int nopos = RelocInfo::kNoPosition;
-
-  auto factory = parser_->factory();
-  auto avfactory = parser_->ast_value_factory();
-  auto scope = parser_->scope_;
-  auto zone = parser_->zone();
-
-  // let O = lhs;
-  Variable* var_O = scope->NewTemporary(avfactory->empty_string());
-  Statement* get_O;
-  {
-    Expression* O_proxy = factory->NewVariableProxy(var_O);
-    Expression* assignment =
-        factory->NewAssignment(Token::ASSIGN, O_proxy, lhs, nopos);
-    get_O = factory->NewExpressionStatement(assignment, nopos);
-  }
-
-  // let C = lhs;
-  Variable* var_C = scope->NewTemporary(avfactory->empty_string());
-  Statement* get_C;
-  {
-    Expression* C_proxy = factory->NewVariableProxy(var_C);
-    Expression* assignment =
-        factory->NewAssignment(Token::ASSIGN, C_proxy, rhs, nopos);
-    get_C = factory->NewExpressionStatement(assignment, nopos);
-  }
-
-  // if (!IS_RECEIVER(C)) throw MakeTypeError(kNonObjectInInstanceOfCheck);
-  Statement* validate_C;
-  {
-    auto args = new (zone) ZoneList<Expression*>(1, zone);
-    args->Add(factory->NewVariableProxy(var_C), zone);
-    Expression* is_receiver_call =
-        factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
-    Expression* call =
-        NewThrowTypeError(MessageTemplate::kNonObjectInInstanceOfCheck,
-                          avfactory->empty_string(), pos);
-    Statement* throw_call = factory->NewExpressionStatement(call, pos);
-
-    validate_C =
-        factory->NewIfStatement(is_receiver_call,
-                                factory->NewEmptyStatement(nopos),
-                                throw_call,
-                                nopos);
-  }
-
-  // let handler_result = C[Symbol.hasInstance];
-  Variable* var_handler_result = scope->NewTemporary(avfactory->empty_string());
-  Statement* initialize_handler;
-  {
-    Expression* hasInstance_symbol_literal =
-        factory->NewSymbolLiteral("hasInstance_symbol", RelocInfo::kNoPosition);
-    Expression* prop = factory->NewProperty(factory->NewVariableProxy(var_C),
-                                            hasInstance_symbol_literal, pos);
-    Expression* handler_proxy = factory->NewVariableProxy(var_handler_result);
-    Expression* assignment =
-        factory->NewAssignment(Token::ASSIGN, handler_proxy, prop, nopos);
-    initialize_handler = factory->NewExpressionStatement(assignment, nopos);
-  }
-
-  // if (handler_result === undefined) {
-  //   if (!IS_CALLABLE(C)) {
-  //     throw MakeTypeError(kCalledNonCallableInstanceOf);
-  //   }
-  //   handler_result = %_GetOrdinaryHasInstance()
-  //   handler_result = %_Call(handler_result, C, O);
-  // } else {
-  //   handler_result = !!%_Call(handler_result, C, O);
-  // }
-  Statement* call_handler;
-  {
-    Expression* condition = factory->NewCompareOperation(
-        Token::EQ_STRICT, factory->NewVariableProxy(var_handler_result),
-        factory->NewUndefinedLiteral(nopos), nopos);
-
-    Block* then_side = factory->NewBlock(nullptr, 3, false, nopos);
-    {
-      Expression* throw_expr =
-          NewThrowTypeError(MessageTemplate::kCalledNonCallableInstanceOf,
-                            avfactory->empty_string(), pos);
-      Statement* validate_C = CheckCallable(var_C, throw_expr, pos);
-
-      ZoneList<Expression*>* empty_args =
-          new (zone) ZoneList<Expression*>(0, zone);
-      Expression* ordinary_has_instance = factory->NewCallRuntime(
-          Runtime::kInlineGetOrdinaryHasInstance, empty_args, pos);
-      Expression* handler_proxy = factory->NewVariableProxy(var_handler_result);
-      Expression* assignment_handler = factory->NewAssignment(
-          Token::ASSIGN, handler_proxy, ordinary_has_instance, nopos);
-      Statement* assignment_get_handler =
-          factory->NewExpressionStatement(assignment_handler, nopos);
-
-      ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(3, zone);
-      args->Add(factory->NewVariableProxy(var_handler_result), zone);
-      args->Add(factory->NewVariableProxy(var_C), zone);
-      args->Add(factory->NewVariableProxy(var_O), zone);
-      Expression* call =
-          factory->NewCallRuntime(Runtime::kInlineCall, args, pos);
-      Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
-      Expression* assignment =
-          factory->NewAssignment(Token::ASSIGN, result_proxy, call, nopos);
-      Statement* assignment_return =
-          factory->NewExpressionStatement(assignment, nopos);
-
-      then_side->statements()->Add(validate_C, zone);
-      then_side->statements()->Add(assignment_get_handler, zone);
-      then_side->statements()->Add(assignment_return, zone);
-    }
-
-    Statement* else_side;
-    {
-      auto args = new (zone) ZoneList<Expression*>(3, zone);
-      args->Add(factory->NewVariableProxy(var_handler_result), zone);
-      args->Add(factory->NewVariableProxy(var_C), zone);
-      args->Add(factory->NewVariableProxy(var_O), zone);
-      Expression* call =
-          factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
-      Expression* inner_not =
-          factory->NewUnaryOperation(Token::NOT, call, nopos);
-      Expression* outer_not =
-          factory->NewUnaryOperation(Token::NOT, inner_not, nopos);
-      Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
-      Expression* assignment =
-          factory->NewAssignment(Token::ASSIGN, result_proxy, outer_not, nopos);
-
-      else_side = factory->NewExpressionStatement(assignment, nopos);
-    }
-    call_handler =
-        factory->NewIfStatement(condition, then_side, else_side, nopos);
-  }
-
-  // do { ... }
-  DoExpression* instanceof;
-  {
-    Block* block = factory->NewBlock(nullptr, 5, true, nopos);
-    block->statements()->Add(get_O, zone);
-    block->statements()->Add(get_C, zone);
-    block->statements()->Add(validate_C, zone);
-    block->statements()->Add(initialize_handler, zone);
-    block->statements()->Add(call_handler, zone);
-
-    // Here is the desugared instanceof.
-    instanceof = factory->NewDoExpression(block, var_handler_result, nopos);
-    Rewriter::Rewrite(parser_, instanceof, avfactory);
-  }
-
-  return instanceof;
-}
-
 Statement* ParserTraits::CheckCallable(Variable* var, Expression* error,
                                        int pos) {
   auto factory = parser_->factory();
@@ -6364,22 +6428,19 @@
 }
 
 void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
-                                      Variable* iterator,
-                                      Maybe<Variable*> input,
+                                      Variable* iterator, Variable* input,
                                       Variable* var_output) {
   //
   // This function adds four statements to [statements], corresponding to the
   // following code:
   //
   //   let iteratorReturn = iterator.return;
-  //   if (IS_NULL_OR_UNDEFINED(iteratorReturn) return |input|;
-  //   output = %_Call(iteratorReturn, iterator|, input|);
+  //   if (IS_NULL_OR_UNDEFINED(iteratorReturn) {
+  //     return {value: input, done: true};
+  //   }
+  //   output = %_Call(iteratorReturn, iterator, input);
   //   if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
   //
-  // Here, |...| denotes optional parts, depending on the presence of the
-  // input variable.  The reason for allowing input is that BuildIteratorClose
-  // can then be reused to handle the return case in yield*.
-  //
 
   const int nopos = RelocInfo::kNoPosition;
   auto factory = parser_->factory();
@@ -6401,33 +6462,31 @@
     get_return = factory->NewExpressionStatement(assignment, nopos);
   }
 
-  // if (IS_NULL_OR_UNDEFINED(iteratorReturn) return |input|;
+  // if (IS_NULL_OR_UNDEFINED(iteratorReturn) {
+  //   return {value: input, done: true};
+  // }
   Statement* check_return;
   {
     Expression* condition = factory->NewCompareOperation(
         Token::EQ, factory->NewVariableProxy(var_return),
         factory->NewNullLiteral(nopos), nopos);
 
-    Expression* value = input.IsJust()
-                            ? static_cast<Expression*>(
-                                  factory->NewVariableProxy(input.FromJust()))
-                            : factory->NewUndefinedLiteral(nopos);
+    Expression* value = factory->NewVariableProxy(input);
 
-    Statement* return_input = factory->NewReturnStatement(value, nopos);
+    Statement* return_input =
+        factory->NewReturnStatement(BuildIteratorResult(value, true), nopos);
 
     check_return = factory->NewIfStatement(
         condition, return_input, factory->NewEmptyStatement(nopos), nopos);
   }
 
-  // output = %_Call(iteratorReturn, iterator, |input|);
+  // output = %_Call(iteratorReturn, iterator, input);
   Statement* call_return;
   {
     auto args = new (zone) ZoneList<Expression*>(3, zone);
     args->Add(factory->NewVariableProxy(var_return), zone);
     args->Add(factory->NewVariableProxy(iterator), zone);
-    if (input.IsJust()) {
-      args->Add(factory->NewVariableProxy(input.FromJust()), zone);
-    }
+    args->Add(factory->NewVariableProxy(input), zone);
 
     Expression* call =
         factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
@@ -6529,8 +6588,8 @@
   Block* maybe_close;
   {
     Block* block = factory->NewBlock(nullptr, 2, true, nopos);
-    parser_->BuildIteratorCloseForCompletion(block->statements(), iter,
-                                             completion);
+    Expression* proxy = factory->NewVariableProxy(completion);
+    parser_->BuildIteratorCloseForCompletion(block->statements(), iter, proxy);
     DCHECK(block->statements()->length() == 2);
 
     maybe_close = factory->NewBlock(nullptr, 1, true, nopos);
@@ -6551,6 +6610,7 @@
     Variable* catch_variable =
         catch_scope->DeclareLocal(avfactory->dot_catch_string(), VAR,
                                   kCreatedInitialized, Variable::NORMAL);
+    catch_scope->set_is_hidden();
 
     Statement* rethrow;
     // We use %ReThrow rather than the ordinary throw because we want to
@@ -6588,7 +6648,7 @@
 
 void ParserTraits::BuildIteratorCloseForCompletion(
     ZoneList<Statement*>* statements, Variable* iterator,
-    Variable* completion) {
+    Expression* completion) {
   //
   // This function adds two statements to [statements], corresponding to the
   // following code:
@@ -6662,6 +6722,7 @@
     Variable* catch_variable = catch_scope->DeclareLocal(
         avfactory->dot_catch_string(), VAR, kCreatedInitialized,
         Variable::NORMAL);
+    catch_scope->set_is_hidden();
 
     try_call_return = factory->NewTryCatchStatement(
         try_block, catch_scope, catch_variable, catch_block, nopos);
@@ -6722,7 +6783,7 @@
   Statement* call_return_carefully;
   {
     Expression* condition = factory->NewCompareOperation(
-        Token::EQ_STRICT, factory->NewVariableProxy(completion),
+        Token::EQ_STRICT, completion,
         factory->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
 
     Block* then_block = factory->NewBlock(nullptr, 2, false, nopos);
diff --git a/src/parsing/parser.h b/src/parsing/parser.h
index c82682e..174b983 100644
--- a/src/parsing/parser.h
+++ b/src/parsing/parser.h
@@ -125,7 +125,6 @@
   // TODO(titzer): these should not be part of ParseInfo.
   //--------------------------------------------------------------------------
   Isolate* isolate() { return isolate_; }
-  Handle<JSFunction> closure() { return closure_; }
   Handle<SharedFunctionInfo> shared_info() { return shared_; }
   Handle<Script> script() { return script_; }
   Handle<Context> context() { return context_; }
@@ -145,7 +144,6 @@
   }
 
   void ReopenHandlesInNewHandleScope() {
-    closure_ = Handle<JSFunction>(*closure_);
     shared_ = Handle<SharedFunctionInfo>(*shared_);
     script_ = Handle<Script>(*script_);
     context_ = Handle<Context>(*context_);
@@ -186,7 +184,6 @@
 
   // TODO(titzer): Move handles and isolate out of ParseInfo.
   Isolate* isolate_;
-  Handle<JSFunction> closure_;
   Handle<SharedFunctionInfo> shared_;
   Handle<Script> script_;
   Handle<Context> context_;
@@ -202,8 +199,6 @@
   void SetFlag(Flag f) { flags_ |= f; }
   void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
   bool GetFlag(Flag f) const { return (flags_ & f) != 0; }
-
-  void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
 };
 
 class FunctionEntry BASE_EMBEDDED {
@@ -360,6 +355,7 @@
   bool IsArguments(const AstRawString* identifier) const;
   bool IsEvalOrArguments(const AstRawString* identifier) const;
   bool IsUndefined(const AstRawString* identifier) const;
+  bool IsAwait(const AstRawString* identifier) const;
   V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const;
 
   // Returns true if the expression is of type "this.foo".
@@ -376,6 +372,12 @@
     return expression->AsVariableProxy()->raw_name();
   }
 
+  bool IsDirectEvalCall(Expression* expression) {
+    if (!expression->IsCall()) return false;
+    expression = expression->AsCall()->expression();
+    return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
+  }
+
   static bool IsBoilerplateProperty(ObjectLiteral::Property* property) {
     return ObjectLiteral::IsBoilerplateProperty(property);
   }
@@ -533,7 +535,11 @@
 
   V8_INLINE void AddParameterInitializationBlock(
       const ParserFormalParameters& parameters,
-      ZoneList<v8::internal::Statement*>* body, bool* ok);
+      ZoneList<v8::internal::Statement*>* body, bool is_async, bool* ok);
+
+  void ParseAsyncArrowSingleExpressionBody(
+      ZoneList<Statement*>* body, bool accept_IN,
+      Type::ExpressionClassifier* classifier, int pos, bool* ok);
 
   V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type,
                             FunctionKind kind = kNormalFunction);
@@ -546,14 +552,15 @@
       Scope* scope, const ParserFormalParameters::Parameter& parameter,
       Type::ExpressionClassifier* classifier);
   void ParseArrowFunctionFormalParameters(ParserFormalParameters* parameters,
-                                          Expression* params,
-                                          const Scanner::Location& params_loc,
+                                          Expression* params, int end_pos,
                                           bool* ok);
   void ParseArrowFunctionFormalParameterList(
       ParserFormalParameters* parameters, Expression* params,
       const Scanner::Location& params_loc,
       Scanner::Location* duplicate_loc, bool* ok);
 
+  V8_INLINE Expression* ParseAsyncFunctionExpression(bool* ok);
+
   V8_INLINE DoExpression* ParseDoExpression(bool* ok);
 
   void ReindexLiterals(const ParserFormalParameters& parameters);
@@ -579,6 +586,7 @@
                                   bool name_is_strict_reserved, int pos,
                                   bool* ok);
 
+  V8_INLINE void MarkCollectedTailCallExpressions();
   V8_INLINE void MarkTailPosition(Expression* expression);
 
   V8_INLINE void CheckConflictingVarDeclarations(v8::internal::Scope* scope,
@@ -636,6 +644,8 @@
                                       ZoneList<v8::internal::Expression*>* args,
                                       int pos);
 
+  Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
+
   // Rewrite all DestructuringAssignments in the current FunctionState.
   V8_INLINE void RewriteDestructuringAssignments();
 
@@ -644,6 +654,8 @@
   V8_INLINE Expression* RewriteAssignExponentiation(Expression* left,
                                                     Expression* right, int pos);
 
+  V8_INLINE Expression* RewriteAwaitExpression(Expression* value, int pos);
+
   V8_INLINE void QueueDestructuringAssignmentForRewriting(
       Expression* assignment);
   V8_INLINE void QueueNonPatternForRewriting(Expression* expr);
@@ -665,16 +677,14 @@
   Expression* RewriteYieldStar(
       Expression* generator, Expression* expression, int pos);
 
-  Expression* RewriteInstanceof(Expression* lhs, Expression* rhs, int pos);
-
  private:
   Parser* parser_;
 
   void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
-                          Maybe<Variable*> input, Variable* output);
-  void BuildIteratorCloseForCompletion(
-      ZoneList<Statement*>* statements, Variable* iterator,
-      Variable* body_threw);
+                          Variable* input, Variable* output);
+  void BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+                                       Variable* iterator,
+                                       Expression* completion);
   Statement* CheckCallable(Variable* var, Expression* error, int pos);
 };
 
@@ -768,8 +778,15 @@
                                bool* ok);
   Statement* ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
                                    bool* ok);
-  Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
+  Statement* ParseFunctionDeclaration(bool* ok);
+  Statement* ParseHoistableDeclaration(ZoneList<const AstRawString*>* names,
                                       bool* ok);
+  Statement* ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
+                                       ZoneList<const AstRawString*>* names,
+                                       bool* ok);
+  Statement* ParseAsyncFunctionDeclaration(ZoneList<const AstRawString*>* names,
+                                           bool* ok);
+  Expression* ParseAsyncFunctionExpression(bool* ok);
   Statement* ParseFunctionDeclaration(int pos, bool is_generator,
                                       ZoneList<const AstRawString*>* names,
                                       bool* ok);
@@ -838,8 +855,6 @@
                                                       Assignment* assignment,
                                                       Scope* scope);
 
-    void set_initializer_position(int pos) { initializer_position_ = pos; }
-
    private:
     PatternRewriter() {}
 
@@ -880,6 +895,8 @@
     PatternContext SetAssignmentContextIfNeeded(Expression* node);
     PatternContext SetInitializerContextIfNeeded(Expression* node);
 
+    void RewriteParameterScopes(Expression* expr);
+
     Variable* CreateTempVar(Expression* value = nullptr);
 
     AstNodeFactory* factory() const { return parser_->factory(); }
@@ -927,8 +944,6 @@
   Statement* ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
   Statement* ParseThrowStatement(bool* ok);
   Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
-  class DontCollectExpressionsInTailPositionScope;
-  class CollectExpressionsInTailPositionToListScope;
   TryStatement* ParseTryStatement(bool* ok);
   DebuggerStatement* ParseDebuggerStatement(bool* ok);
   // Parse a SubStatement in strict mode, or with an extra block scope in
@@ -948,15 +963,22 @@
 
   // Initialize the components of a for-in / for-of statement.
   void InitializeForEachStatement(ForEachStatement* stmt, Expression* each,
-                                  Expression* subject, Statement* body);
+                                  Expression* subject, Statement* body,
+                                  int each_keyword_pos);
   void InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
                                 Expression* iterable, Statement* body,
-                                int iterable_pos);
+                                int next_result_pos = RelocInfo::kNoPosition);
   Statement* DesugarLexicalBindingsInForStatement(
       Scope* inner_scope, VariableMode mode,
       ZoneList<const AstRawString*>* names, ForStatement* loop, Statement* init,
       Expression* cond, Statement* next, Statement* body, bool* ok);
 
+  void DesugarAsyncFunctionBody(const AstRawString* function_name, Scope* scope,
+                                ZoneList<Statement*>* body,
+                                Type::ExpressionClassifier* classifier,
+                                FunctionKind kind, FunctionBody type,
+                                bool accept_IN, int pos, bool* ok);
+
   void RewriteDoExpression(Expression* expr, bool* ok);
 
   FunctionLiteral* ParseFunctionLiteral(
@@ -1027,6 +1049,7 @@
 
   Block* BuildParameterInitializationBlock(
       const ParserFormalParameters& parameters, bool* ok);
+  Block* BuildRejectPromiseOnException(Block* block);
 
   // Consumes the ending }.
   ZoneList<Statement*>* ParseEagerFunctionBody(
@@ -1054,6 +1077,8 @@
   void SetLanguageMode(Scope* scope, LanguageMode mode);
   void RaiseLanguageMode(LanguageMode mode);
 
+  V8_INLINE void MarkCollectedTailCallExpressions();
+
   V8_INLINE void RewriteDestructuringAssignments();
 
   V8_INLINE Expression* RewriteExponentiation(Expression* left,
@@ -1069,6 +1094,10 @@
   friend class InitializerRewriter;
   void RewriteParameterInitializer(Expression* expr, Scope* scope);
 
+  Expression* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
+  Expression* BuildPromiseResolve(Expression* value, int pos);
+  Expression* BuildPromiseReject(Expression* value, int pos);
+
   Scanner scanner_;
   PreParser* reusable_preparser_;
   Scope* original_scope_;  // for ES5 function declarations in sloppy eval
@@ -1238,20 +1267,27 @@
   }
 }
 
-
 void ParserTraits::AddParameterInitializationBlock(
     const ParserFormalParameters& parameters,
-    ZoneList<v8::internal::Statement*>* body, bool* ok) {
+    ZoneList<v8::internal::Statement*>* body, bool is_async, bool* ok) {
   if (!parameters.is_simple) {
     auto* init_block =
         parser_->BuildParameterInitializationBlock(parameters, ok);
     if (!*ok) return;
+
+    if (is_async) {
+      init_block = parser_->BuildRejectPromiseOnException(init_block);
+    }
+
     if (init_block != nullptr) {
       body->Add(init_block, parser_->zone());
     }
   }
 }
 
+Expression* ParserTraits::ParseAsyncFunctionExpression(bool* ok) {
+  return parser_->ParseAsyncFunctionExpression(ok);
+}
 
 DoExpression* ParserTraits::ParseDoExpression(bool* ok) {
   return parser_->ParseDoExpression(ok);
diff --git a/src/parsing/pattern-rewriter.cc b/src/parsing/pattern-rewriter.cc
index e699255..3dcff98 100644
--- a/src/parsing/pattern-rewriter.cc
+++ b/src/parsing/pattern-rewriter.cc
@@ -272,15 +272,9 @@
           factory()->NewExpressionStatement(initialize, initialize->position()),
           zone());
     }
-  } else if (value != nullptr && (descriptor_->mode == CONST_LEGACY ||
-                                  IsLexicalVariableMode(descriptor_->mode))) {
-    // Constant initializations always assign to the declared constant which
-    // is always at the function scope level. This is only relevant for
-    // dynamically looked-up variables and constants (the
-    // start context for constant lookups is always the function context,
-    // while it is the top context for var declared variables). Sigh...
-    // For 'let' and 'const' declared variables in harmony mode the
-    // initialization also always assigns to the declared variable.
+  } else if (value != nullptr && IsLexicalVariableMode(descriptor_->mode)) {
+    // For 'let' and 'const' declared variables the initialization always
+    // assigns to the declared variable.
     DCHECK_NOT_NULL(proxy);
     DCHECK_NOT_NULL(proxy->var());
     DCHECK_NOT_NULL(value);
@@ -387,6 +381,37 @@
   return set_context(old_context);
 }
 
+// Two cases for scope rewriting the scope of default parameters:
+// - Eagerly parsed arrow functions are initially parsed as having
+//   expressions in the enclosing scope, but when the arrow is encountered,
+//   need to be in the scope of the function.
+// - When an extra declaration scope needs to be inserted to account for
+//   a sloppy eval in a default parameter or function body, the expressions
+//   needs to be in that new inner scope which was added after initial
+//   parsing.
+// Each of these cases can be handled by rewriting the contents of the
+// expression to the current scope. The source scope is typically the outer
+// scope when one case occurs; when both cases occur, both scopes need to
+// be included as the outer scope. (Both rewritings still need to be done
+// to account for lazily parsed arrow functions which hit the second case.)
+// TODO(littledan): Remove the outer_scope parameter of
+//                  RewriteParameterInitializerScope
+void Parser::PatternRewriter::RewriteParameterScopes(Expression* expr) {
+  if (!IsBindingContext()) return;
+  if (descriptor_->declaration_kind != DeclarationDescriptor::PARAMETER) return;
+  if (!scope()->is_arrow_scope() && !scope()->is_block_scope()) return;
+
+  // Either this scope is an arrow scope or a declaration block scope.
+  DCHECK(scope()->is_declaration_scope());
+
+  if (scope()->outer_scope()->is_arrow_scope() && scope()->is_block_scope()) {
+    RewriteParameterInitializerScope(parser_->stack_limit(), expr,
+                                     scope()->outer_scope()->outer_scope(),
+                                     scope());
+  }
+  RewriteParameterInitializerScope(parser_->stack_limit(), expr,
+                                   scope()->outer_scope(), scope());
+}
 
 void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
                                                  Variable** temp_var) {
@@ -396,6 +421,11 @@
 
   for (ObjectLiteralProperty* property : *pattern->properties()) {
     PatternContext context = SetInitializerContextIfNeeded(property->value());
+
+    // Computed property names contain expressions which might require
+    // scope rewriting.
+    if (!property->key()->IsLiteral()) RewriteParameterScopes(property->key());
+
     RecurseIntoSubpattern(
         property->value(),
         factory()->NewProperty(factory()->NewVariableProxy(temp),
@@ -552,11 +582,11 @@
 
     // let array = [];
     // while (!done) {
+    //   done = true;  // If .next, .done or .value throws, don't close.
     //   result = IteratorNext(iterator);
-    //   if (result.done) {
-    //     done = true;
-    //   } else {
+    //   if (!result.done) {
     //     %AppendElement(array, result.value);
+    //     done = false;
     //   }
     // }
 
@@ -571,12 +601,6 @@
           node->literal_index(), RelocInfo::kNoPosition));
     }
 
-    // result = IteratorNext(iterator);
-    Statement* get_next = factory()->NewExpressionStatement(
-        parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
-                                         result, nopos),
-        nopos);
-
     // done = true;
     Statement* set_done = factory()->NewExpressionStatement(
         factory()->NewAssignment(
@@ -584,6 +608,12 @@
             factory()->NewBooleanLiteral(true, nopos), nopos),
         nopos);
 
+    // result = IteratorNext(iterator);
+    Statement* get_next = factory()->NewExpressionStatement(
+        parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
+                                         result, nopos),
+        nopos);
+
     // %AppendElement(array, result.value);
     Statement* append_element;
     {
@@ -600,29 +630,44 @@
           nopos);
     }
 
-    // if (result.done) { #set_done } else { #append_element }
-    Statement* set_done_or_append;
+    // done = false;
+    Statement* unset_done = factory()->NewExpressionStatement(
+        factory()->NewAssignment(
+            Token::ASSIGN, factory()->NewVariableProxy(done),
+            factory()->NewBooleanLiteral(false, nopos), nopos),
+        nopos);
+
+    // if (!result.done) { #append_element; #unset_done }
+    Statement* maybe_append_and_unset_done;
     {
       Expression* result_done =
           factory()->NewProperty(factory()->NewVariableProxy(result),
                                  factory()->NewStringLiteral(
                                      ast_value_factory()->done_string(), nopos),
                                  nopos);
-      set_done_or_append = factory()->NewIfStatement(result_done, set_done,
-                                                     append_element, nopos);
+
+      Block* then = factory()->NewBlock(nullptr, 2, true, nopos);
+      then->statements()->Add(append_element, zone());
+      then->statements()->Add(unset_done, zone());
+
+      maybe_append_and_unset_done = factory()->NewIfStatement(
+          factory()->NewUnaryOperation(Token::NOT, result_done, nopos), then,
+          factory()->NewEmptyStatement(nopos), nopos);
     }
 
     // while (!done) {
+    //   #set_done;
     //   #get_next;
-    //   #set_done_or_append;
+    //   #maybe_append_and_unset_done;
     // }
     WhileStatement* loop = factory()->NewWhileStatement(nullptr, nopos);
     {
       Expression* condition = factory()->NewUnaryOperation(
           Token::NOT, factory()->NewVariableProxy(done), nopos);
-      Block* body = factory()->NewBlock(nullptr, 2, true, nopos);
+      Block* body = factory()->NewBlock(nullptr, 3, true, nopos);
+      body->statements()->Add(set_done, zone());
       body->statements()->Add(get_next, zone());
-      body->statements()->Add(set_done_or_append, zone());
+      body->statements()->Add(maybe_append_and_unset_done, zone());
       loop->Initialize(condition, body);
     }
 
@@ -668,12 +713,8 @@
                                       RelocInfo::kNoPosition);
   }
 
-  if (IsBindingContext() &&
-      descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
-      scope()->is_arrow_scope()) {
-    RewriteParameterInitializerScope(parser_->stack_limit(), initializer,
-                                     scope()->outer_scope(), scope());
-  }
+  // Initializer may have been parsed in the wrong scope.
+  RewriteParameterScopes(initializer);
 
   PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
   RecurseIntoSubpattern(node->target(), value);
diff --git a/src/parsing/preparser.cc b/src/parsing/preparser.cc
index da1c35b..0a091c6 100644
--- a/src/parsing/preparser.cc
+++ b/src/parsing/preparser.cc
@@ -12,8 +12,8 @@
 #include "src/hashmap.h"
 #include "src/list.h"
 #include "src/parsing/parser-base.h"
-#include "src/parsing/preparse-data.h"
 #include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparse-data.h"
 #include "src/parsing/preparser.h"
 #include "src/unicode.h"
 #include "src/utils.h"
@@ -38,8 +38,10 @@
 
 
 PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) {
-  if (scanner->current_token() == Token::FUTURE_RESERVED_WORD) {
-    return PreParserIdentifier::FutureReserved();
+  if (scanner->current_token() == Token::ENUM) {
+    return PreParserIdentifier::Enum();
+  } else if (scanner->current_token() == Token::AWAIT) {
+    return PreParserIdentifier::Await();
   } else if (scanner->current_token() ==
              Token::FUTURE_STRICT_RESERVED_WORD) {
     return PreParserIdentifier::FutureStrictReserved();
@@ -49,6 +51,8 @@
     return PreParserIdentifier::Static();
   } else if (scanner->current_token() == Token::YIELD) {
     return PreParserIdentifier::Yield();
+  } else if (scanner->current_token() == Token::ASYNC) {
+    return PreParserIdentifier::Async();
   }
   if (scanner->UnescapedLiteralMatches("eval", 4)) {
     return PreParserIdentifier::Eval();
@@ -98,11 +102,13 @@
       function_token_position, type, language_mode, ok);
 }
 
-
 PreParser::PreParseResult PreParser::PreParseLazyFunction(
     LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
-    ParserRecorder* log, Scanner::BookmarkScope* bookmark) {
+    bool parsing_module, ParserRecorder* log, Scanner::BookmarkScope* bookmark,
+    int* use_counts) {
+  parsing_module_ = parsing_module;
   log_ = log;
+  use_counts_ = use_counts;
   // Lazy functions always have trivial outer scopes (no with/catch scopes).
   Scope* top_scope = NewScope(scope_, SCRIPT_SCOPE);
   PreParserFactory top_factory(NULL);
@@ -118,6 +124,7 @@
   bool ok = true;
   int start_position = peek_position();
   ParseLazyFunctionLiteralBody(&ok, bookmark);
+  use_counts_ = nullptr;
   if (bookmark && bookmark->HasBeenReset()) {
     // Do nothing, as we've just aborted scanning this function.
   } else if (stack_overflow()) {
@@ -129,6 +136,7 @@
     if (is_strict(scope_->language_mode())) {
       int end_pos = scanner()->location().end_pos;
       CheckStrictOctalLiteral(start_position, end_pos, &ok);
+      CheckDecimalLiteralWithLeadingZero(use_counts, start_position, end_pos);
       if (!ok) return kPreParseSuccess;
     }
   }
@@ -178,19 +186,23 @@
 
   switch (peek()) {
     case Token::FUNCTION:
-      return ParseFunctionDeclaration(ok);
+      return ParseHoistableDeclaration(ok);
     case Token::CLASS:
       return ParseClassDeclaration(ok);
     case Token::CONST:
-      if (allow_const()) {
-        return ParseVariableStatement(kStatementListItem, ok);
-      }
-      break;
+      return ParseVariableStatement(kStatementListItem, ok);
     case Token::LET:
       if (IsNextLetKeyword()) {
         return ParseVariableStatement(kStatementListItem, ok);
       }
       break;
+    case Token::ASYNC:
+      if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+          !scanner()->HasAnyLineTerminatorAfterNext()) {
+        Consume(Token::ASYNC);
+        return ParseAsyncFunctionDeclaration(ok);
+      }
+    /* falls through */
     default:
       break;
   }
@@ -281,7 +293,9 @@
       (legacy && allow_harmony_restrictive_declarations())) {
     return ParseSubStatement(kDisallowLabelledFunctionStatement, ok);
   } else {
-    return ParseFunctionDeclaration(CHECK_OK);
+    Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
+    BlockState block_state(&scope_, body_scope);
+    return ParseFunctionDeclaration(ok);
   }
 }
 
@@ -377,37 +391,64 @@
   }
 }
 
+PreParser::Statement PreParser::ParseHoistableDeclaration(
+    int pos, ParseFunctionFlags flags, bool* ok) {
+  const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
+  const bool is_async = flags & ParseFunctionFlags::kIsAsync;
+  DCHECK(!is_generator || !is_async);
 
-PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
-  // FunctionDeclaration ::
-  //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
-  // GeneratorDeclaration ::
-  //   'function' '*' Identifier '(' FormalParameterListopt ')'
-  //      '{' FunctionBody '}'
-  Expect(Token::FUNCTION, CHECK_OK);
-  int pos = position();
-  bool is_generator = Check(Token::MUL);
   bool is_strict_reserved = false;
   Identifier name = ParseIdentifierOrStrictReservedWord(
       &is_strict_reserved, CHECK_OK);
+
+  if (V8_UNLIKELY(is_async_function() && this->IsAwait(name))) {
+    ReportMessageAt(scanner()->location(),
+                    MessageTemplate::kAwaitBindingIdentifier);
+    *ok = false;
+    return Statement::Default();
+  }
+
   ParseFunctionLiteral(name, scanner()->location(),
                        is_strict_reserved ? kFunctionNameIsStrictReserved
                                           : kFunctionNameValidityUnknown,
                        is_generator ? FunctionKind::kGeneratorFunction
-                                    : FunctionKind::kNormalFunction,
+                                    : is_async ? FunctionKind::kAsyncFunction
+                                               : FunctionKind::kNormalFunction,
                        pos, FunctionLiteral::kDeclaration, language_mode(),
                        CHECK_OK);
   return Statement::FunctionDeclaration();
 }
 
+PreParser::Statement PreParser::ParseAsyncFunctionDeclaration(bool* ok) {
+  // AsyncFunctionDeclaration ::
+  //   async [no LineTerminator here] function BindingIdentifier[Await]
+  //       ( FormalParameters[Await] ) { AsyncFunctionBody }
+  DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+  int pos = position();
+  Expect(Token::FUNCTION, CHECK_OK);
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
+  return ParseHoistableDeclaration(pos, flags, ok);
+}
+
+PreParser::Statement PreParser::ParseHoistableDeclaration(bool* ok) {
+  // FunctionDeclaration ::
+  //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
+  // GeneratorDeclaration ::
+  //   'function' '*' Identifier '(' FormalParameterListopt ')'
+  //      '{' FunctionBody '}'
+
+  Expect(Token::FUNCTION, CHECK_OK);
+  int pos = position();
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+  if (Check(Token::MUL)) {
+    flags |= ParseFunctionFlags::kIsGenerator;
+  }
+  return ParseHoistableDeclaration(pos, flags, ok);
+}
+
 
 PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
   Expect(Token::CLASS, CHECK_OK);
-  if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
-    ReportMessage(MessageTemplate::kSloppyLexical);
-    *ok = false;
-    return Statement::Default();
-  }
 
   int pos = position();
   bool is_strict_reserved = false;
@@ -423,10 +464,14 @@
   // Block ::
   //   '{' StatementList '}'
 
+  Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
   Expect(Token::LBRACE, CHECK_OK);
   Statement final = Statement::Default();
-  while (peek() != Token::RBRACE) {
-    final = ParseStatementListItem(CHECK_OK);
+  {
+    BlockState block_state(&scope_, block_scope);
+    while (peek() != Token::RBRACE) {
+      final = ParseStatementListItem(CHECK_OK);
+    }
   }
   Expect(Token::RBRACE, ok);
   return final;
@@ -473,7 +518,7 @@
   bool is_pattern = false;
   if (peek() == Token::VAR) {
     Consume(Token::VAR);
-  } else if (peek() == Token::CONST && allow_const()) {
+  } else if (peek() == Token::CONST) {
     // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
     //
     // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
@@ -485,12 +530,10 @@
     // existing pages. Therefore we keep allowing const with the old
     // non-harmony semantics in sloppy mode.
     Consume(Token::CONST);
-    if (is_strict(language_mode()) || allow_harmony_sloppy()) {
-      DCHECK(var_context != kStatement);
-      require_initializer = true;
-      lexical = true;
-    }
-  } else if (peek() == Token::LET && allow_let()) {
+    DCHECK(var_context != kStatement);
+    require_initializer = true;
+    lexical = true;
+  } else if (peek() == Token::LET) {
     Consume(Token::LET);
     DCHECK(var_context != kStatement);
     lexical = true;
@@ -556,6 +599,22 @@
   return Statement::Default();
 }
 
+PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
+  Consume(Token::FUNCTION);
+  int pos = position();
+  ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+  if (Check(Token::MUL)) {
+    flags |= ParseFunctionFlags::kIsGenerator;
+    if (allow_harmony_restrictive_declarations()) {
+      PreParserTraits::ReportMessageAt(
+          scanner()->location(), MessageTemplate::kGeneratorInLegacyContext);
+      *ok = false;
+      return Statement::Default();
+    }
+  }
+  return ParseHoistableDeclaration(pos, flags, ok);
+}
+
 PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
     AllowLabelledFunctionStatement allow_function, bool* ok) {
   // ExpressionStatement | LabelledStatement ::
@@ -586,7 +645,8 @@
   if (starts_with_identifier && expr.IsIdentifier() && peek() == Token::COLON) {
     // Expression is a single identifier, and not, e.g., a parenthesized
     // identifier.
-    DCHECK(!expr.AsIdentifier().IsFutureReserved());
+    DCHECK(!expr.AsIdentifier().IsEnum());
+    DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
     DCHECK(is_sloppy(language_mode()) ||
            !IsFutureStrictReserved(expr.AsIdentifier()));
     Consume(Token::COLON);
@@ -606,14 +666,6 @@
     // accept "native function" in the preparser.
   }
   // Parsed expression statement.
-  // Detect attempts at 'let' declarations in sloppy mode.
-  if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
-      is_sloppy(language_mode()) && expr.IsIdentifier() &&
-      expr.AsIdentifier().IsLet()) {
-    ReportMessage(MessageTemplate::kSloppyLexical, NULL);
-    *ok = false;
-    return Statement::Default();
-  }
   ExpectSemicolon(CHECK_OK);
   return Statement::ExpressionStatement(expr);
 }
@@ -696,6 +748,16 @@
       tok != Token::SEMICOLON &&
       tok != Token::RBRACE &&
       tok != Token::EOS) {
+    // Because of the return code rewriting that happens in case of a subclass
+    // constructor we don't want to accept tail calls, therefore we don't set
+    // ReturnExprScope to kInsideValidReturnStatement here.
+    ReturnExprContext return_expr_context =
+        IsSubclassConstructor(function_state_->kind())
+            ? function_state_->return_expr_context()
+            : ReturnExprContext::kInsideValidReturnStatement;
+
+    ReturnExprScope maybe_allow_tail_calls(function_state_,
+                                           return_expr_context);
     ParseExpression(true, CHECK_OK);
   }
   ExpectSemicolon(CHECK_OK);
@@ -732,23 +794,27 @@
   ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
 
-  Expect(Token::LBRACE, CHECK_OK);
-  Token::Value token = peek();
-  while (token != Token::RBRACE) {
-    if (token == Token::CASE) {
-      Expect(Token::CASE, CHECK_OK);
-      ParseExpression(true, CHECK_OK);
-    } else {
-      Expect(Token::DEFAULT, CHECK_OK);
-    }
-    Expect(Token::COLON, CHECK_OK);
-    token = peek();
-    Statement statement = Statement::Jump();
-    while (token != Token::CASE &&
-           token != Token::DEFAULT &&
-           token != Token::RBRACE) {
-      statement = ParseStatementListItem(CHECK_OK);
+  Scope* cases_scope = NewScope(scope_, BLOCK_SCOPE);
+  {
+    BlockState cases_block_state(&scope_, cases_scope);
+    Expect(Token::LBRACE, CHECK_OK);
+    Token::Value token = peek();
+    while (token != Token::RBRACE) {
+      if (token == Token::CASE) {
+        Expect(Token::CASE, CHECK_OK);
+        ParseExpression(true, CHECK_OK);
+      } else {
+        Expect(Token::DEFAULT, CHECK_OK);
+      }
+      Expect(Token::COLON, CHECK_OK);
       token = peek();
+      Statement statement = Statement::Jump();
+      while (token != Token::CASE &&
+             token != Token::DEFAULT &&
+             token != Token::RBRACE) {
+        statement = ParseStatementListItem(CHECK_OK);
+        token = peek();
+      }
     }
   }
   Expect(Token::RBRACE, ok);
@@ -788,12 +854,16 @@
   // ForStatement ::
   //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
 
+  // Create an in-between scope for let-bound iteration variables.
+  Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
+  bool has_lexical = false;
+
+  BlockState block_state(&scope_, for_scope);
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
-  bool is_let_identifier_expression = false;
   if (peek() != Token::SEMICOLON) {
     ForEachStatement::VisitMode mode;
-    if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
+    if (peek() == Token::VAR || peek() == Token::CONST ||
         (peek() == Token::LET && IsNextLetKeyword())) {
       int decl_count;
       bool is_lexical;
@@ -803,6 +873,7 @@
       ParseVariableDeclarations(kForStatement, &decl_count, &is_lexical,
                                 &is_binding_pattern, &first_initializer_loc,
                                 &bindings_loc, CHECK_OK);
+      if (is_lexical) has_lexical = true;
       if (CheckInOrOf(&mode, ok)) {
         if (!*ok) return Statement::Default();
         if (decl_count != 1) {
@@ -814,7 +885,12 @@
         }
         if (first_initializer_loc.IsValid() &&
             (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
-             is_lexical || is_binding_pattern)) {
+             is_lexical || is_binding_pattern || allow_harmony_for_in())) {
+          // Only increment the use count if we would have let this through
+          // without the flag.
+          if (use_counts_ != nullptr && allow_harmony_for_in()) {
+            ++use_counts_[v8::Isolate::kForInInitializer];
+          }
           PreParserTraits::ReportMessageAt(
               first_initializer_loc, MessageTemplate::kForInOfLoopInitializer,
               ForEachStatement::VisitModeString(mode));
@@ -831,7 +907,11 @@
         }
 
         Expect(Token::RPAREN, CHECK_OK);
-        ParseScopedStatement(true, CHECK_OK);
+        {
+          ReturnExprScope no_tail_calls(function_state_,
+                                        ReturnExprContext::kInsideForInOfBody);
+          ParseScopedStatement(true, CHECK_OK);
+        }
         return Statement::Default();
       }
     } else {
@@ -839,8 +919,6 @@
       ExpressionClassifier classifier(this);
       Expression lhs = ParseExpression(false, &classifier, CHECK_OK);
       int lhs_end_pos = scanner()->location().end_pos;
-      is_let_identifier_expression =
-          lhs.IsIdentifier() && lhs.AsIdentifier().IsLet();
       bool is_for_each = CheckInOrOf(&mode, ok);
       if (!*ok) return Statement::Default();
       bool is_destructuring = is_for_each &&
@@ -868,33 +946,39 @@
         }
 
         Expect(Token::RPAREN, CHECK_OK);
-        ParseScopedStatement(true, CHECK_OK);
+        Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
+        {
+          BlockState block_state(&scope_, body_scope);
+          ParseScopedStatement(true, CHECK_OK);
+        }
         return Statement::Default();
       }
     }
   }
 
   // Parsed initializer at this point.
-  // Detect attempts at 'let' declarations in sloppy mode.
-  if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
-      is_sloppy(language_mode()) && is_let_identifier_expression) {
-    ReportMessage(MessageTemplate::kSloppyLexical, NULL);
-    *ok = false;
-    return Statement::Default();
-  }
   Expect(Token::SEMICOLON, CHECK_OK);
 
-  if (peek() != Token::SEMICOLON) {
-    ParseExpression(true, CHECK_OK);
-  }
-  Expect(Token::SEMICOLON, CHECK_OK);
+  // If there are let bindings, then condition and the next statement of the
+  // for loop must be parsed in a new scope.
+  Scope* inner_scope = scope_;
+  if (has_lexical) inner_scope = NewScope(for_scope, BLOCK_SCOPE);
 
-  if (peek() != Token::RPAREN) {
-    ParseExpression(true, CHECK_OK);
-  }
-  Expect(Token::RPAREN, CHECK_OK);
+  {
+    BlockState block_state(&scope_, inner_scope);
 
-  ParseScopedStatement(true, ok);
+    if (peek() != Token::SEMICOLON) {
+      ParseExpression(true, CHECK_OK);
+    }
+    Expect(Token::SEMICOLON, CHECK_OK);
+
+    if (peek() != Token::RPAREN) {
+      ParseExpression(true, CHECK_OK);
+    }
+    Expect(Token::RPAREN, CHECK_OK);
+
+    ParseScopedStatement(true, ok);
+  }
   return Statement::Default();
 }
 
@@ -929,7 +1013,11 @@
 
   Expect(Token::TRY, CHECK_OK);
 
-  ParseBlock(CHECK_OK);
+  {
+    ReturnExprScope no_tail_calls(function_state_,
+                                  ReturnExprContext::kInsideTryBlock);
+    ParseBlock(CHECK_OK);
+  }
 
   Token::Value tok = peek();
   if (tok != Token::CATCH && tok != Token::FINALLY) {
@@ -937,24 +1025,42 @@
     *ok = false;
     return Statement::Default();
   }
+  TailCallExpressionList tail_call_expressions_in_catch_block(zone());
+  bool catch_block_exists = false;
   if (tok == Token::CATCH) {
     Consume(Token::CATCH);
     Expect(Token::LPAREN, CHECK_OK);
+    Scope* catch_scope = NewScope(scope_, CATCH_SCOPE);
     ExpressionClassifier pattern_classifier(this);
     ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
     ValidateBindingPattern(&pattern_classifier, CHECK_OK);
     Expect(Token::RPAREN, CHECK_OK);
     {
-      // TODO(adamk): Make this CATCH_SCOPE
-      Scope* with_scope = NewScope(scope_, WITH_SCOPE);
-      BlockState block_state(&scope_, with_scope);
-      ParseBlock(CHECK_OK);
+      CollectExpressionsInTailPositionToListScope
+          collect_tail_call_expressions_scope(
+              function_state_, &tail_call_expressions_in_catch_block);
+      BlockState block_state(&scope_, catch_scope);
+      Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+      {
+        BlockState block_state(&scope_, block_scope);
+        ParseBlock(CHECK_OK);
+      }
     }
+    catch_block_exists = true;
     tok = peek();
   }
   if (tok == Token::FINALLY) {
     Consume(Token::FINALLY);
     ParseBlock(CHECK_OK);
+    if (FLAG_harmony_explicit_tailcalls && catch_block_exists &&
+        tail_call_expressions_in_catch_block.has_explicit_tail_calls()) {
+      // TODO(ishell): update chapter number.
+      // ES8 XX.YY.ZZ
+      ReportMessageAt(tail_call_expressions_in_catch_block.location(),
+                      MessageTemplate::kUnexpectedTailCallInCatchBlock);
+      *ok = false;
+      return Statement::Default();
+    }
   }
   return Statement::Default();
 }
@@ -1012,9 +1118,8 @@
 
   // See Parser::ParseFunctionLiteral for more information about lazy parsing
   // and lazy compilation.
-  bool is_lazily_parsed =
-      (outer_is_script_scope && allow_lazy() && !parenthesized_function_);
-  parenthesized_function_ = false;
+  bool is_lazily_parsed = (outer_is_script_scope && allow_lazy() &&
+                           !function_state_->this_function_is_parenthesized());
 
   Expect(Token::LBRACE, CHECK_OK);
   if (is_lazily_parsed) {
@@ -1039,11 +1144,44 @@
   if (is_strict(language_mode)) {
     int end_position = scanner()->location().end_pos;
     CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
+    CheckDecimalLiteralWithLeadingZero(use_counts_, start_position,
+                                       end_position);
   }
 
   return Expression::Default();
 }
 
+PreParser::Expression PreParser::ParseAsyncFunctionExpression(bool* ok) {
+  // AsyncFunctionDeclaration ::
+  //   async [no LineTerminator here] function ( FormalParameters[Await] )
+  //       { AsyncFunctionBody }
+  //
+  //   async [no LineTerminator here] function BindingIdentifier[Await]
+  //       ( FormalParameters[Await] ) { AsyncFunctionBody }
+  int pos = position();
+  Expect(Token::FUNCTION, CHECK_OK);
+  bool is_strict_reserved = false;
+  Identifier name;
+  FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
+
+  if (peek_any_identifier()) {
+    type = FunctionLiteral::kNamedExpression;
+    name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+    if (this->IsAwait(name)) {
+      ReportMessageAt(scanner()->location(),
+                      MessageTemplate::kAwaitBindingIdentifier);
+      *ok = false;
+      return Expression::Default();
+    }
+  }
+
+  ParseFunctionLiteral(name, scanner()->location(),
+                       is_strict_reserved ? kFunctionNameIsStrictReserved
+                                          : kFunctionNameValidityUnknown,
+                       FunctionKind::kAsyncFunction, pos, type, language_mode(),
+                       CHECK_OK);
+  return Expression::Default();
+}
 
 void PreParser::ParseLazyFunctionLiteralBody(bool* ok,
                                              Scanner::BookmarkScope* bookmark) {
@@ -1090,6 +1228,7 @@
   if (has_extends) {
     ExpressionClassifier extends_classifier(this);
     ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
+    CheckNoTailCallExpressions(&extends_classifier, CHECK_OK);
     ValidateExpression(&extends_classifier, CHECK_OK);
     if (classifier != nullptr) {
       classifier->Accumulate(&extends_classifier,
@@ -1104,12 +1243,11 @@
   while (peek() != Token::RBRACE) {
     if (Check(Token::SEMICOLON)) continue;
     const bool in_class = true;
-    const bool is_static = false;
     bool is_computed_name = false;  // Classes do not care about computed
                                     // property names here.
     Identifier name;
     ExpressionClassifier property_classifier(this);
-    ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
+    ParsePropertyDefinition(&checker, in_class, has_extends, MethodKind::Normal,
                             &is_computed_name, &has_seen_constructor,
                             &property_classifier, &name, CHECK_OK);
     ValidateExpression(&property_classifier, CHECK_OK);
@@ -1151,15 +1289,24 @@
   //     do '{' StatementList '}'
   Expect(Token::DO, CHECK_OK);
   Expect(Token::LBRACE, CHECK_OK);
-  Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
-  {
-    BlockState block_state(&scope_, block_scope);
-    while (peek() != Token::RBRACE) {
-      ParseStatementListItem(CHECK_OK);
-    }
-    Expect(Token::RBRACE, CHECK_OK);
-    return PreParserExpression::Default();
+  while (peek() != Token::RBRACE) {
+    ParseStatementListItem(CHECK_OK);
   }
+  Expect(Token::RBRACE, CHECK_OK);
+  return PreParserExpression::Default();
+}
+
+void PreParserTraits::ParseAsyncArrowSingleExpressionBody(
+    PreParserStatementList body, bool accept_IN,
+    Type::ExpressionClassifier* classifier, int pos, bool* ok) {
+  Scope* scope = pre_parser_->scope_;
+  scope->ForceContextAllocation();
+
+  PreParserExpression return_value =
+      pre_parser_->ParseAssignmentExpression(accept_IN, classifier, ok);
+  if (!*ok) return;
+
+  body->Add(PreParserStatement::ExpressionStatement(return_value), zone());
 }
 
 #undef CHECK_OK
diff --git a/src/parsing/preparser.h b/src/parsing/preparser.h
index f2f6951..16eeab4 100644
--- a/src/parsing/preparser.h
+++ b/src/parsing/preparser.h
@@ -55,6 +55,15 @@
   static PreParserIdentifier Constructor() {
     return PreParserIdentifier(kConstructorIdentifier);
   }
+  static PreParserIdentifier Enum() {
+    return PreParserIdentifier(kEnumIdentifier);
+  }
+  static PreParserIdentifier Await() {
+    return PreParserIdentifier(kAwaitIdentifier);
+  }
+  static PreParserIdentifier Async() {
+    return PreParserIdentifier(kAsyncIdentifier);
+  }
   bool IsEval() const { return type_ == kEvalIdentifier; }
   bool IsArguments() const { return type_ == kArgumentsIdentifier; }
   bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
@@ -64,7 +73,9 @@
   bool IsYield() const { return type_ == kYieldIdentifier; }
   bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
   bool IsConstructor() const { return type_ == kConstructorIdentifier; }
-  bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
+  bool IsEnum() const { return type_ == kEnumIdentifier; }
+  bool IsAwait() const { return type_ == kAwaitIdentifier; }
+  bool IsAsync() const { return type_ == kAsyncIdentifier; }
   bool IsFutureStrictReserved() const {
     return type_ == kFutureStrictReservedIdentifier ||
            type_ == kLetIdentifier || type_ == kStaticIdentifier ||
@@ -91,7 +102,10 @@
     kArgumentsIdentifier,
     kUndefinedIdentifier,
     kPrototypeIdentifier,
-    kConstructorIdentifier
+    kConstructorIdentifier,
+    kEnumIdentifier,
+    kAwaitIdentifier,
+    kAsyncIdentifier
   };
 
   explicit PreParserIdentifier(Type type) : type_(type) {}
@@ -166,6 +180,12 @@
                                ExpressionTypeField::encode(kCallExpression));
   }
 
+  static PreParserExpression CallEval() {
+    return PreParserExpression(
+        TypeField::encode(kExpression) |
+        ExpressionTypeField::encode(kCallEvalExpression));
+  }
+
   static PreParserExpression SuperCallReference() {
     return PreParserExpression(
         TypeField::encode(kExpression) |
@@ -227,7 +247,13 @@
 
   bool IsCall() const {
     return TypeField::decode(code_) == kExpression &&
-           ExpressionTypeField::decode(code_) == kCallExpression;
+           (ExpressionTypeField::decode(code_) == kCallExpression ||
+            ExpressionTypeField::decode(code_) == kCallEvalExpression);
+  }
+
+  bool IsDirectEvalCall() const {
+    return TypeField::decode(code_) == kExpression &&
+           ExpressionTypeField::decode(code_) == kCallEvalExpression;
   }
 
   bool IsSuperCallReference() const {
@@ -285,6 +311,7 @@
     kThisPropertyExpression,
     kPropertyExpression,
     kCallExpression,
+    kCallEvalExpression,
     kSuperCallReference,
     kNoTemplateTagExpression,
     kAssignment
@@ -494,6 +521,9 @@
   PreParserExpression NewCall(PreParserExpression expression,
                               PreParserExpressionList arguments,
                               int pos) {
+    if (expression.IsIdentifier() && expression.AsIdentifier().IsEval()) {
+      return PreParserExpression::CallEval();
+    }
     return PreParserExpression::Call();
   }
   PreParserExpression NewCallNew(PreParserExpression expression,
@@ -597,6 +627,14 @@
     return identifier.IsArguments();
   }
 
+  static bool IsAwait(PreParserIdentifier identifier) {
+    return identifier.IsAwait();
+  }
+
+  static bool IsAsync(PreParserIdentifier identifier) {
+    return identifier.IsAsync();
+  }
+
   static bool IsEvalOrArguments(PreParserIdentifier identifier) {
     return identifier.IsEvalOrArguments();
   }
@@ -626,6 +664,14 @@
     return expression.AsIdentifier();
   }
 
+  static bool IsEvalIdentifier(PreParserExpression expression) {
+    return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
+  }
+
+  static bool IsDirectEvalCall(PreParserExpression expression) {
+    return expression.IsDirectEvalCall();
+  }
+
   static bool IsFutureStrictReserved(PreParserIdentifier identifier) {
     return identifier.IsFutureStrictReserved();
   }
@@ -814,8 +860,8 @@
   }
 
   static void AddParameterInitializationBlock(
-      const PreParserFormalParameters& parameters,
-      PreParserStatementList list, bool* ok) {}
+      const PreParserFormalParameters& parameters, PreParserStatementList list,
+      bool is_async, bool* ok) {}
 
   V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
                                       int* expected_property_count, bool* ok) {
@@ -832,6 +878,12 @@
       PreParserExpression expression, const Scanner::Location& params_loc,
       Scanner::Location* duplicate_loc, bool* ok);
 
+  void ParseAsyncArrowSingleExpressionBody(
+      PreParserStatementList body, bool accept_IN,
+      Type::ExpressionClassifier* classifier, int pos, bool* ok);
+
+  V8_INLINE PreParserExpression ParseAsyncFunctionExpression(bool* ok);
+
   void ReindexLiterals(const PreParserFormalParameters& paramaters) {}
 
   struct TemplateLiteralState {};
@@ -888,6 +940,7 @@
                                         bool name_is_strict_reserved, int pos,
                                         bool* ok);
 
+  V8_INLINE void MarkCollectedTailCallExpressions() {}
   V8_INLINE void MarkTailPosition(PreParserExpression) {}
 
   PreParserExpressionList PrepareSpreadArguments(PreParserExpressionList list) {
@@ -903,6 +956,11 @@
                                            PreParserExpressionList args,
                                            int pos);
 
+  inline PreParserExpression ExpressionListToExpression(
+      PreParserExpressionList args) {
+    return PreParserExpression::Default();
+  }
+
   inline void RewriteDestructuringAssignments() {}
 
   inline PreParserExpression RewriteExponentiation(PreParserExpression left,
@@ -926,14 +984,14 @@
   inline void RewriteNonPattern(Type::ExpressionClassifier* classifier,
                                 bool* ok);
 
+  inline PreParserExpression RewriteAwaitExpression(PreParserExpression value,
+                                                    int pos);
+
   V8_INLINE Zone* zone() const;
   V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const;
 
   inline PreParserExpression RewriteYieldStar(
       PreParserExpression generator, PreParserExpression expr, int pos);
-  inline PreParserExpression RewriteInstanceof(PreParserExpression lhs,
-                                               PreParserExpression rhs,
-                                               int pos);
 
  private:
   PreParser* pre_parser_;
@@ -966,19 +1024,30 @@
   PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
             ParserRecorder* log, uintptr_t stack_limit)
       : ParserBase<PreParserTraits>(zone, scanner, stack_limit, NULL,
-                                    ast_value_factory, log, this) {}
+                                    ast_value_factory, log, this),
+        use_counts_(nullptr) {}
 
   // Pre-parse the program from the character stream; returns true on
   // success (even if parsing failed, the pre-parse data successfully
   // captured the syntax error), and false if a stack-overflow happened
   // during parsing.
-  PreParseResult PreParseProgram(int* materialized_literals = 0) {
+  PreParseResult PreParseProgram(int* materialized_literals = 0,
+                                 bool is_module = false) {
     Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
+
+    // ModuleDeclarationInstantiation for Source Text Module Records creates a
+    // new Module Environment Record whose outer lexical environment record is
+    // the global scope.
+    if (is_module) {
+      scope = NewScope(scope, MODULE_SCOPE);
+    }
+
     PreParserFactory factory(NULL);
     FunctionState top_scope(&function_state_, &scope_, scope, kNormalFunction,
                             &factory);
     bool ok = true;
     int start_position = scanner()->peek_location().beg_pos;
+    parsing_module_ = is_module;
     ParseStatementList(Token::EOS, &ok);
     if (stack_overflow()) return kPreParseStackOverflow;
     if (!ok) {
@@ -986,6 +1055,8 @@
     } else if (is_strict(scope_->language_mode())) {
       CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
                               &ok);
+      CheckDecimalLiteralWithLeadingZero(use_counts_, start_position,
+                                         scanner()->location().end_pos);
     }
     if (materialized_literals) {
       *materialized_literals = function_state_->materialized_literal_count();
@@ -1001,9 +1072,12 @@
   // keyword and parameters, and have consumed the initial '{'.
   // At return, unless an error occurred, the scanner is positioned before the
   // the final '}'.
-  PreParseResult PreParseLazyFunction(
-      LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
-      ParserRecorder* log, Scanner::BookmarkScope* bookmark = nullptr);
+  PreParseResult PreParseLazyFunction(LanguageMode language_mode,
+                                      FunctionKind kind,
+                                      bool has_simple_parameters,
+                                      bool parsing_module, ParserRecorder* log,
+                                      Scanner::BookmarkScope* bookmark,
+                                      int* use_counts);
 
  private:
   friend class PreParserTraits;
@@ -1027,7 +1101,12 @@
   Statement ParseSubStatement(AllowLabelledFunctionStatement allow_function,
                               bool* ok);
   Statement ParseScopedStatement(bool legacy, bool* ok);
+  Statement ParseHoistableDeclaration(bool* ok);
+  Statement ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
+                                      bool* ok);
   Statement ParseFunctionDeclaration(bool* ok);
+  Statement ParseAsyncFunctionDeclaration(bool* ok);
+  Expression ParseAsyncFunctionExpression(bool* ok);
   Statement ParseClassDeclaration(bool* ok);
   Statement ParseBlock(bool* ok);
   Statement ParseVariableStatement(VariableDeclarationContext var_context,
@@ -1077,6 +1156,8 @@
                                         Scanner::Location class_name_location,
                                         bool name_is_strict_reserved, int pos,
                                         bool* ok);
+
+  int* use_counts_;
 };
 
 
@@ -1114,6 +1195,9 @@
   // lists that are too long.
 }
 
+PreParserExpression PreParserTraits::ParseAsyncFunctionExpression(bool* ok) {
+  return pre_parser_->ParseAsyncFunctionExpression(ok);
+}
 
 PreParserExpression PreParserTraits::ParseDoExpression(bool* ok) {
   return pre_parser_->ParseDoExpression(ok);
@@ -1125,6 +1209,10 @@
   pre_parser_->ValidateExpression(classifier, ok);
 }
 
+PreParserExpression PreParserTraits::RewriteAwaitExpression(
+    PreParserExpression value, int pos) {
+  return value;
+}
 
 Zone* PreParserTraits::zone() const {
   return pre_parser_->function_state_->scope()->zone();
@@ -1141,20 +1229,20 @@
   return PreParserExpression::Default();
 }
 
-PreParserExpression PreParserTraits::RewriteInstanceof(PreParserExpression lhs,
-                                                       PreParserExpression rhs,
-                                                       int pos) {
-  return PreParserExpression::Default();
-}
-
 PreParserStatementList PreParser::ParseEagerFunctionBody(
     PreParserIdentifier function_name, int pos,
     const PreParserFormalParameters& parameters, FunctionKind kind,
     FunctionLiteral::FunctionType function_type, bool* ok) {
   ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
 
-  ParseStatementList(Token::RBRACE, ok);
-  if (!*ok) return PreParserStatementList();
+  Scope* inner_scope = scope_;
+  if (!parameters.is_simple) inner_scope = NewScope(scope_, BLOCK_SCOPE);
+
+  {
+    BlockState block_state(&scope_, inner_scope);
+    ParseStatementList(Token::RBRACE, ok);
+    if (!*ok) return PreParserStatementList();
+  }
 
   Expect(Token::RBRACE, ok);
   return PreParserStatementList();
diff --git a/src/parsing/scanner-character-streams.h b/src/parsing/scanner-character-streams.h
index 603db93..7e065cf 100644
--- a/src/parsing/scanner-character-streams.h
+++ b/src/parsing/scanner-character-streams.h
@@ -158,8 +158,10 @@
 
   void PushBack(uc32 character) override {
     DCHECK(buffer_cursor_ > raw_data_);
-    buffer_cursor_--;
     pos_--;
+    if (character != kEndOfInput) {
+      buffer_cursor_--;
+    }
   }
 
   bool SetBookmark() override;
diff --git a/src/parsing/scanner.cc b/src/parsing/scanner.cc
index 698cb5e..6a9b32e 100644
--- a/src/parsing/scanner.cc
+++ b/src/parsing/scanner.cc
@@ -40,6 +40,7 @@
     : unicode_cache_(unicode_cache),
       bookmark_c0_(kNoBookmark),
       octal_pos_(Location::invalid()),
+      decimal_with_leading_zero_pos_(Location::invalid()),
       found_html_comment_(false),
       allow_harmony_exponentiation_operator_(false) {
   bookmark_current_.literal_chars = &bookmark_current_literal_;
@@ -249,6 +250,7 @@
   if (V8_UNLIKELY(next_next_.token != Token::UNINITIALIZED)) {
     next_ = next_next_;
     next_next_.token = Token::UNINITIALIZED;
+    has_line_terminator_before_next_ = has_line_terminator_after_next_;
     return current_.token;
   }
   has_line_terminator_before_next_ = false;
@@ -274,7 +276,12 @@
     return next_next_.token;
   }
   TokenDesc prev = current_;
+  bool has_line_terminator_before_next =
+      has_line_terminator_before_next_ || has_multiline_comment_before_next_;
   Next();
+  has_line_terminator_after_next_ =
+      has_line_terminator_before_next_ || has_multiline_comment_before_next_;
+  has_line_terminator_before_next_ = has_line_terminator_before_next;
   Token::Value ret = next_.token;
   next_next_ = next_;
   next_ = current_;
@@ -975,10 +982,18 @@
 Token::Value Scanner::ScanNumber(bool seen_period) {
   DCHECK(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
 
-  enum { DECIMAL, HEX, OCTAL, IMPLICIT_OCTAL, BINARY } kind = DECIMAL;
+  enum {
+    DECIMAL,
+    DECIMAL_WITH_LEADING_ZERO,
+    HEX,
+    OCTAL,
+    IMPLICIT_OCTAL,
+    BINARY
+  } kind = DECIMAL;
 
   LiteralScope literal(this);
   bool at_start = !seen_period;
+  int start_pos = source_pos();  // For reporting octal positions.
   if (seen_period) {
     // we have already seen a decimal point of the float
     AddLiteralChar('.');
@@ -987,7 +1002,6 @@
   } else {
     // if the first character is '0' we must check for octals and hex
     if (c0_ == '0') {
-      int start_pos = source_pos();  // For reporting octal positions.
       AddLiteralCharAdvance();
 
       // either 0, 0exxx, 0Exxx, 0.xxx, a hex number, a binary number or
@@ -1029,7 +1043,7 @@
         while (true) {
           if (c0_ == '8' || c0_ == '9') {
             at_start = false;
-            kind = DECIMAL;
+            kind = DECIMAL_WITH_LEADING_ZERO;
             break;
           }
           if (c0_  < '0' || '7'  < c0_) {
@@ -1039,11 +1053,13 @@
           }
           AddLiteralCharAdvance();
         }
+      } else if (c0_ == '8' || c0_ == '9') {
+        kind = DECIMAL_WITH_LEADING_ZERO;
       }
     }
 
     // Parse decimal digits and allow trailing fractional part.
-    if (kind == DECIMAL) {
+    if (kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO) {
       if (at_start) {
         uint64_t value = 0;
         while (IsDecimalDigit(c0_)) {
@@ -1060,6 +1076,8 @@
           literal.Complete();
           HandleLeadSurrogate();
 
+          if (kind == DECIMAL_WITH_LEADING_ZERO)
+            decimal_with_leading_zero_pos_ = Location(start_pos, source_pos());
           return Token::SMI;
         }
         HandleLeadSurrogate();
@@ -1076,7 +1094,8 @@
   // scan exponent, if any
   if (c0_ == 'e' || c0_ == 'E') {
     DCHECK(kind != HEX);  // 'e'/'E' must be scanned as part of the hex number
-    if (kind != DECIMAL) return Token::ILLEGAL;
+    if (!(kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO))
+      return Token::ILLEGAL;
     // scan exponent
     AddLiteralCharAdvance();
     if (c0_ == '+' || c0_ == '-')
@@ -1098,6 +1117,8 @@
 
   literal.Complete();
 
+  if (kind == DECIMAL_WITH_LEADING_ZERO)
+    decimal_with_leading_zero_pos_ = Location(start_pos, source_pos());
   return Token::NUMBER;
 }
 
@@ -1135,6 +1156,9 @@
 // Keyword Matcher
 
 #define KEYWORDS(KEYWORD_GROUP, KEYWORD)                    \
+  KEYWORD_GROUP('a')                                        \
+  KEYWORD("async", Token::ASYNC)                            \
+  KEYWORD("await", Token::AWAIT)                            \
   KEYWORD_GROUP('b')                                        \
   KEYWORD("break", Token::BREAK)                            \
   KEYWORD_GROUP('c')                                        \
@@ -1150,7 +1174,7 @@
   KEYWORD("do", Token::DO)                                  \
   KEYWORD_GROUP('e')                                        \
   KEYWORD("else", Token::ELSE)                              \
-  KEYWORD("enum", Token::FUTURE_RESERVED_WORD)              \
+  KEYWORD("enum", Token::ENUM)                              \
   KEYWORD("export", Token::EXPORT)                          \
   KEYWORD("extends", Token::EXTENDS)                        \
   KEYWORD_GROUP('f')                                        \
@@ -1196,7 +1220,6 @@
   KEYWORD_GROUP('y')                                        \
   KEYWORD("yield", Token::YIELD)
 
-
 static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
                                              int input_length, bool escaped) {
   DCHECK(input_length >= 1);
diff --git a/src/parsing/scanner.h b/src/parsing/scanner.h
index 22c504c..0acc7ab 100644
--- a/src/parsing/scanner.h
+++ b/src/parsing/scanner.h
@@ -225,8 +225,14 @@
     } else {
       is_one_byte_ = other->is_one_byte_;
       position_ = other->position_;
-      backing_store_.Dispose();
-      backing_store_ = other->backing_store_.Clone();
+      if (position_ < backing_store_.length()) {
+        std::copy(other->backing_store_.begin(),
+                  other->backing_store_.begin() + position_,
+                  backing_store_.begin());
+      } else {
+        backing_store_.Dispose();
+        backing_store_ = other->backing_store_.Clone();
+      }
     }
   }
 
@@ -419,6 +425,13 @@
   // Returns the location of the last seen octal literal.
   Location octal_position() const { return octal_pos_; }
   void clear_octal_position() { octal_pos_ = Location::invalid(); }
+  // Returns the location of the last seen decimal literal with a leading zero.
+  Location decimal_with_leading_zero_position() const {
+    return decimal_with_leading_zero_pos_;
+  }
+  void clear_decimal_with_leading_zero_position() {
+    decimal_with_leading_zero_pos_ = Location::invalid();
+  }
 
   // Returns the value of the last smi that was scanned.
   int smi_value() const { return current_.smi_value_; }
@@ -436,6 +449,12 @@
            has_multiline_comment_before_next_;
   }
 
+  bool HasAnyLineTerminatorAfterNext() {
+    Token::Value ensure_next_next = PeekAhead();
+    USE(ensure_next_next);
+    return has_line_terminator_after_next_;
+  }
+
   // Scans the input as a regular expression pattern, previous
   // character(s) must be /(=). Returns true if a pattern is scanned.
   bool ScanRegExpPattern(bool seen_equal);
@@ -582,7 +601,7 @@
   }
 
   void PushBack(uc32 ch) {
-    if (ch > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+    if (c0_ > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
       source_->PushBack(unibrow::Utf16::TrailSurrogate(c0_));
       source_->PushBack(unibrow::Utf16::LeadSurrogate(c0_));
     } else {
@@ -766,9 +785,9 @@
   // Input stream. Must be initialized to an Utf16CharacterStream.
   Utf16CharacterStream* source_;
 
-
-  // Start position of the octal literal last scanned.
+  // Last-seen positions of potentially problematic tokens.
   Location octal_pos_;
+  Location decimal_with_leading_zero_pos_;
 
   // One Unicode character look-ahead; c0_ < 0 at the end of the input.
   uc32 c0_;
@@ -780,6 +799,7 @@
   // Whether there is a multi-line comment that contains a
   // line-terminator after the current token, and before the next.
   bool has_multiline_comment_before_next_;
+  bool has_line_terminator_after_next_;
 
   // Whether this scanner encountered an HTML comment.
   bool found_html_comment_;
diff --git a/src/parsing/token.h b/src/parsing/token.h
index fae9ea8..8b44cda 100644
--- a/src/parsing/token.h
+++ b/src/parsing/token.h
@@ -148,10 +148,13 @@
   T(IDENTIFIER, NULL, 0)                                             \
                                                                      \
   /* Future reserved words (ECMA-262, section 7.6.1.2). */           \
-  T(FUTURE_RESERVED_WORD, NULL, 0)                                   \
   T(FUTURE_STRICT_RESERVED_WORD, NULL, 0)                            \
+  K(ASYNC, "async", 0)                                               \
+  /* `await` is a reserved word in module code only */               \
+  K(AWAIT, "await", 0)                                               \
   K(CLASS, "class", 0)                                               \
   K(CONST, "const", 0)                                               \
+  K(ENUM, "enum", 0)                                                 \
   K(EXPORT, "export", 0)                                             \
   K(EXTENDS, "extends", 0)                                           \
   K(IMPORT, "import", 0)                                             \
@@ -173,7 +176,6 @@
   T(TEMPLATE_SPAN, NULL, 0)                                          \
   T(TEMPLATE_TAIL, NULL, 0)
 
-
 class Token {
  public:
   // All token values.
@@ -197,9 +199,10 @@
   }
 
   static bool IsIdentifier(Value tok, LanguageMode language_mode,
-                           bool is_generator) {
+                           bool is_generator, bool is_module) {
     switch (tok) {
       case IDENTIFIER:
+      case ASYNC:
         return true;
       case ESCAPED_STRICT_RESERVED_WORD:
       case FUTURE_STRICT_RESERVED_WORD:
@@ -208,6 +211,8 @@
         return is_sloppy(language_mode);
       case YIELD:
         return !is_generator && is_sloppy(language_mode);
+      case AWAIT:
+        return !is_module;
       default:
         return false;
     }
diff --git a/src/ppc/assembler-ppc-inl.h b/src/ppc/assembler-ppc-inl.h
index c495fee..59d3248 100644
--- a/src/ppc/assembler-ppc-inl.h
+++ b/src/ppc/assembler-ppc-inl.h
@@ -89,11 +89,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Assembler::target_address_at(pc_, host_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
          rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
@@ -183,19 +178,6 @@
   return pc + (len + 2) * kInstrSize;
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
-                                   icache_flush_mode);
-}
-
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -343,7 +325,7 @@
   }
 }
 
-
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/ppc/assembler-ppc.cc b/src/ppc/assembler-ppc.cc
index 507eec1..bf59955 100644
--- a/src/ppc/assembler-ppc.cc
+++ b/src/ppc/assembler-ppc.cc
@@ -155,6 +155,43 @@
   return false;
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return static_cast<uint32_t>(
+     reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_memory_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_memory_reference &&
+           updated_memory_reference < new_base + new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
+        icache_flush_mode);
+  } else {
+    UNREACHABLE();
+  }
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of Operand and MemOperand
diff --git a/src/ppc/assembler-ppc.h b/src/ppc/assembler-ppc.h
index 3e8be7d..a9cf730 100644
--- a/src/ppc/assembler-ppc.h
+++ b/src/ppc/assembler-ppc.h
@@ -109,6 +109,8 @@
   V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
   V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
 
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+
 #define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
   V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)         \
   V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d15)               \
@@ -238,6 +240,11 @@
   int reg_code;
 };
 
+typedef DoubleRegister FloatRegister;
+
+// TODO(ppc) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
 #define DECLARE_REGISTER(R) \
   const DoubleRegister R = {DoubleRegister::kCode_##R};
 DOUBLE_REGISTERS(DECLARE_REGISTER)
@@ -283,9 +290,6 @@
 const CRegister cr6 = {6};
 const CRegister cr7 = {7};
 
-// TODO(ppc) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
 // -----------------------------------------------------------------------------
 // Machine instruction Operands
 
@@ -1210,7 +1214,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   // Writes a single byte or word of data in the code stream.  Used
   // for inline tables, e.g., jump-tables.
diff --git a/src/ppc/builtins-ppc.cc b/src/ppc/builtins-ppc.cc
index 884afed..a6263cd 100644
--- a/src/ppc/builtins-ppc.cc
+++ b/src/ppc/builtins-ppc.cc
@@ -605,15 +605,10 @@
     // r3: number of arguments
     // r4: constructor function
     // r6: new target
-    if (is_api_function) {
-      __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-      Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ Call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(r3);
-      __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+
+    ParameterCount actual(r3);
+    __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -705,6 +700,150 @@
   Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the value to pass to the generator
+  //  -- r4 : the JSGeneratorObject to resume
+  //  -- r5 : the resume mode (tagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(r4);
+
+  // Store input value into generator object.
+  __ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOffset), r0);
+  __ RecordWriteField(r4, JSGeneratorObject::kInputOffset, r3, r6,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
+
+  // Load suspended function and context.
+  __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
+  __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  __ mov(ip, Operand(step_in_enabled));
+  __ lbz(ip, MemOperand(ip));
+  __ cmpi(ip, Operand::Zero());
+  __ beq(&skip_flooding);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4, r5, r7);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r4, r5);
+    __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Push receiver.
+  __ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+  __ Push(ip);
+
+  // ----------- S t a t e -------------
+  //  -- r4    : the JSGeneratorObject to resume
+  //  -- r5    : the resume mode (tagged)
+  //  -- r7    : generator function
+  //  -- cp    : generator context
+  //  -- lr    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadWordArith(
+      r3, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label loop, done_loop;
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+#if V8_TARGET_ARCH_PPC64
+    __ cmpi(r3, Operand::Zero());
+    __ beq(&done_loop);
+#else
+    __ SmiUntag(r3, SetRC);
+    __ beq(&done_loop, cr0);
+#endif
+    __ mtctr(r3);
+    __ bind(&loop);
+    __ push(ip);
+    __ bdnz(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+  __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
+  __ bne(&old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ mr(r6, r4);
+    __ mr(r4, r7);
+    __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+    __ JumpToJSEntry(ip);
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushStandardFrame(r7);
+
+    // Restore the operand stack.
+    __ LoadP(r3, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
+    __ LoadP(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
+    __ addi(r3, r3,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    {
+      Label loop, done_loop;
+      __ SmiUntag(r6, SetRC);
+      __ beq(&done_loop, cr0);
+      __ mtctr(r6);
+      __ bind(&loop);
+      __ LoadPU(ip, MemOperand(r3, kPointerSize));
+      __ Push(ip);
+      __ bdnz(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+    __ StoreP(ip, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset),
+              r0);
+
+    // Resume the generator function at the continuation.
+    __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
+    __ addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+    {
+      ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+      if (FLAG_enable_embedded_constant_pool) {
+        __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r6);
+      }
+      __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
+      __ SmiUntag(r5);
+      __ add(r6, r6, r5);
+      __ LoadSmiLiteral(r5,
+                        Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+      __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
+                r0);
+      __ mr(r3, r4);  // Continuation expects generator object in r3.
+      __ Jump(r6);
+    }
+  }
+}
 
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -847,14 +986,16 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ PushStandardFrame(r4);
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into kInterpreterBytecodeRegister.
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
   Label array_done;
   Register debug_info = r5;
@@ -870,8 +1011,13 @@
            FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
   __ bind(&array_done);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ CompareRoot(kInterpreterBytecodeArrayRegister,
+                 Heap::kUndefinedValueRootIndex);
+  __ beq(&bytecode_array_not_present);
+
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
     __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
     __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
@@ -879,8 +1025,12 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
-  // Push new.target, bytecode array and zero for bytecode array offset.
-  __ li(r3, Operand::Zero());
+  // Load initial bytecode offset.
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(r3, kInterpreterBytecodeOffsetRegister);
   __ Push(r6, kInterpreterBytecodeArrayRegister, r3);
 
   // Allocate the local and temporary register file on the stack.
@@ -911,18 +1061,8 @@
     __ bind(&no_args);
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load accumulator, register file, bytecode offset, dispatch table into
-  // registers.
+  // Load accumulator and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ addi(kInterpreterRegisterFileRegister, fp,
-          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ mov(kInterpreterBytecodeOffsetRegister,
-         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
   __ mov(kInterpreterDispatchTableRegister,
          Operand(ExternalReference::interpreter_dispatch_table_address(
              masm->isolate())));
@@ -932,33 +1072,33 @@
                          kInterpreterBytecodeOffsetRegister));
   __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
   __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(ip);
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
-}
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
+  // The return value is in r3.
 
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
-
-  // The return value is in accumulator, which is already in r3.
+  // Get the arguments + reciever count.
+  __ LoadP(r5, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ lwz(r5, FieldMemOperand(r5, BytecodeArray::kParameterSizeOffset));
 
   // Leave the frame (also dropping the register file).
   __ LeaveFrame(StackFrame::JAVA_SCRIPT);
 
-  // Drop receiver + arguments and return.
-  __ lwz(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
-                             BytecodeArray::kParameterSizeOffset));
-  __ add(sp, sp, r0);
+  __ add(sp, sp, r5);
   __ blr();
+
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ bind(&bytecode_array_not_present);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset));
+  __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(r4, r7, r8);
+  __ JumpToJSEntry(r7);
 }
 
 
@@ -973,7 +1113,6 @@
   __ bdnz(&loop);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -997,7 +1136,6 @@
           RelocInfo::CODE_TARGET);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -1022,25 +1160,25 @@
   __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ Move(r5, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value() +
+                          Code::kHeaderSize - kHeapObjectTag));
+  __ mtlr(r0);
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register and dispatch table register.
-  __ addi(kInterpreterRegisterFileRegister, fp,
-          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  // Initialize the dispatch table register.
   __ mov(kInterpreterDispatchTableRegister,
          Operand(ExternalReference::interpreter_dispatch_table_address(
              masm->isolate())));
 
-  // Get the context from the frame.
-  __ LoadP(kContextRegister,
-           MemOperand(kInterpreterRegisterFileRegister,
-                      InterpreterFrameConstants::kContextFromRegisterPointer));
-
   // Get the bytecode array pointer from the frame.
-  __ LoadP(
-      kInterpreterBytecodeArrayRegister,
-      MemOperand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1053,9 +1191,7 @@
 
   // Get the target bytecode offset from the frame.
   __ LoadP(kInterpreterBytecodeOffsetRegister,
-           MemOperand(
-               kInterpreterRegisterFileRegister,
-               InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+          MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
   // Dispatch to the target bytecode.
@@ -1063,65 +1199,157 @@
                          kInterpreterBytecodeOffsetRegister));
   __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
   __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
-  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(ip);
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
-    __ Push(r4);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register.
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ mov(r0,
-         Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
-  __ mtlr(r0);
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : argument count (preserved for callee)
+  //  -- r6 : new target (preserved for callee)
+  //  -- r4 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = r4;
+  Register map = r9;
+  Register index = r5;
+  __ LoadP(map,
+           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(map,
+           FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
+  __ blt(&gotta_call_runtime);
+
+  // Find literals.
+  // r10 : native context
+  // r5  : length / index
+  // r9  : optimized code map
+  // r6  : new target
+  // r4  : closure
+  Register native_context = r10;
+  __ LoadP(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = r11;
+  Register array_pointer = r8;
+
+  // Does the native context match?
+  __ SmiToPtrArrayOffset(array_pointer, index);
+  __ add(array_pointer, map, array_pointer);
+  __ LoadP(temp, FieldMemOperand(array_pointer,
+                                 SharedFunctionInfo::kOffsetToPreviousContext));
+  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ cmp(temp, native_context);
+  __ bne(&loop_bottom);
+  // OSR id set to none?
+  __ LoadP(temp,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
+  __ bne(&loop_bottom);
+  // Literals available?
+  __ LoadP(temp,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
+  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r7,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Code available?
+  Register entry = r7;
+  __ LoadP(entry,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  // Store code entry in the closure.
+  __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(closure, entry, r8);
+
+  // Link the closure into the optimized function list.
+  // r7 : code entry
+  // r10: native context
+  // r4 : closure
+  __ LoadP(
+      r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
+            r0);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, temp,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ StoreP(
+      closure,
+      ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
+  // Save closure before the write barrier.
+  __ mr(r8, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, r8, temp,
+                            kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ JumpToJSEntry(entry);
+
+  __ bind(&loop_bottom);
+  __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
+                   r0);
+  __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
+  __ bgt(&loop_top);
+
+  // We found neither literals nor code.
+  __ b(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ LoadP(entry,
+           FieldMemOperand(map, FixedArray::kHeaderSize +
+                                    SharedFunctionInfo::kSharedCodeIndex));
+  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ b(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  // Is the full code valid?
+  __ LoadP(entry,
+           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ lwz(r8, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ DecodeField<Code::KindField>(r8);
+  __ cmpi(r8, Operand(Code::BUILTIN));
+  __ beq(&gotta_call_runtime);
+  // Yes, install the full code.
+  __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(closure, entry, r8);
+  __ JumpToJSEntry(entry);
+
+  __ bind(&gotta_call_runtime);
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
@@ -1267,14 +1495,19 @@
   __ SmiUntag(r9);
   // Switch on the state.
   Label with_tos_register, unknown_state;
-  __ cmpi(r9, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ cmpi(
+      r9,
+      Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
   __ bne(&with_tos_register);
   __ addi(sp, sp, Operand(1 * kPointerSize));  // Remove state.
   __ Ret();
 
   __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
   __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
-  __ cmpi(r9, Operand(FullCodeGenerator::TOS_REG));
+  __ cmpi(
+      r9,
+      Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
   __ bne(&unknown_state);
   __ addi(sp, sp, Operand(2 * kPointerSize));  // Remove state.
   __ Ret();
@@ -1497,28 +1730,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r3    : argc
-  //  -- sp[0] : first argument (left-hand side)
-  //  -- sp[4] : receiver (right-hand side)
-  // -----------------------------------
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ LoadP(InstanceOfDescriptor::LeftRegister(),
-             MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
-    __ LoadP(InstanceOfDescriptor::RightRegister(),
-             MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ Ret(2);
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r3    : argc
@@ -2460,6 +2671,30 @@
           RelocInfo::CODE_TARGET);
 }
 
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r4);
+  __ Push(r4);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r4);
+  __ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(r4, r5);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc
index 0671f99..f0f74c3 100644
--- a/src/ppc/code-stubs-ppc.cc
+++ b/src/ppc/code-stubs-ppc.cc
@@ -54,12 +54,6 @@
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -72,11 +66,6 @@
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1383,126 +1372,6 @@
 }
 
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = r4;              // Object (lhs).
-  Register const function = r3;            // Function (rhs).
-  Register const object_map = r5;          // Map of {object}.
-  Register const function_map = r6;        // Map of {function}.
-  Register const function_prototype = r7;  // Prototype of {function}.
-  Register const scratch = r8;
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ bne(&fast_case);
-  __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-  __ bne(&fast_case);
-  __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
-  __ Ret();
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
-  __ bne(&slow_case);
-  __ LoadRoot(r3, Heap::kFalseValueRootIndex);
-  __ Ret();
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
-  __ bne(&slow_case);
-
-  // Go to the runtime if the function is not a constructor.
-  __ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
-  __ TestBit(scratch, Map::kIsConstructor, r0);
-  __ beq(&slow_case, cr0);
-
-  // Ensure that {function} has an instance prototype.
-  __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
-  __ bne(&slow_case, cr0);
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ LoadP(function_prototype,
-           FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
-  __ bne(&function_prototype_valid);
-  __ LoadP(function_prototype,
-           FieldMemOperand(function_prototype, Map::kPrototypeOffset));
-  __ bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Register const object_instance_type = function_map;
-  Register const map_bit_field = function_map;
-  Register const null = scratch;
-  Register const result = r3;
-
-  Label done, loop, fast_runtime_fallback;
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ LoadRoot(null, Heap::kNullValueRootIndex);
-  __ bind(&loop);
-
-  // Check if the object needs to be access checked.
-  __ lbz(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
-  __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
-  __ bne(&fast_runtime_fallback, cr0);
-  // Check if the current object is a Proxy.
-  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
-  __ beq(&fast_runtime_fallback);
-
-  __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ cmp(object, function_prototype);
-  __ beq(&done);
-  __ cmp(object, null);
-  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ bne(&loop);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-  __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
-  __ Ret();
-
-  // Found Proxy or access check needed: Call the runtime
-  __ bind(&fast_runtime_fallback);
-  __ Push(object, function_prototype);
-  // Invalidate the instanceof cache.
-  __ LoadSmiLiteral(scratch, Smi::FromInt(0));
-  __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ Push(object, function);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
-
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
@@ -3960,8 +3829,8 @@
   __ bind(&not_array);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ bne(&miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
                                                receiver, name, feedback,
                                                receiver_map, scratch1, r10);
@@ -4105,8 +3974,8 @@
   __ bind(&not_array);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ bne(&miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
       scratch1, scratch2);
@@ -4698,15 +4567,15 @@
   __ bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ StoreP(r5, MemOperand(r3, JSObject::kMapOffset));
+  __ StoreP(r5, FieldMemOperand(r3, JSObject::kMapOffset), r0);
   __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r6, MemOperand(r3, JSObject::kPropertiesOffset));
-  __ StoreP(r6, MemOperand(r3, JSObject::kElementsOffset));
+  __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ addi(r4, r3, Operand(JSObject::kHeaderSize));
+  __ addi(r4, r3, Operand(JSObject::kHeaderSize - kHeapObjectTag));
 
   // ----------- S t a t e -------------
-  //  -- r3 : result (untagged)
+  //  -- r3 : result (tagged)
   //  -- r4 : result fields (untagged)
   //  -- r8 : result end (untagged)
   //  -- r5 : initial map
@@ -4724,9 +4593,6 @@
   {
     // Initialize all in-object fields with undefined.
     __ InitializeFieldsWithFiller(r4, r8, r9);
-
-    // Add the object tag to make the JSObject real.
-    __ addi(r3, r3, Operand(kHeapObjectTag));
     __ Ret();
   }
   __ bind(&slack_tracking);
@@ -4746,9 +4612,6 @@
     __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
     __ InitializeFieldsWithFiller(r4, r8, r9);
 
-    // Add the object tag to make the JSObject real.
-    __ addi(r3, r3, Operand(kHeapObjectTag));
-
     // Check if we can finalize the instance size.
     __ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd));
     __ Ret(ne);
@@ -4774,10 +4637,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(r5);
   }
-  __ subi(r3, r3, Operand(kHeapObjectTag));
   __ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset));
   __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
   __ add(r8, r3, r8);
+  __ subi(r8, r8, Operand(kHeapObjectTag));
   __ b(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4795,20 +4658,20 @@
   // -----------------------------------
   __ AssertFunction(r4);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make r5 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ mr(r5, fp);
-    __ b(&loop_entry);
-    __ bind(&loop);
+  // Make r5 point to the JavaScript frame.
+  __ mr(r5, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
     __ cmp(ip, r4);
-    __ bne(&loop);
+    __ beq(&ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4842,7 +4705,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the rest parameter array in r0.
@@ -4885,7 +4748,7 @@
     Label allocate, done_allocate;
     __ mov(r4, Operand(JSArray::kSize + FixedArray::kHeaderSize));
     __ add(r4, r4, r9);
-    __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+    __ Allocate(r4, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in r6.
@@ -4939,6 +4802,23 @@
   // -----------------------------------
   __ AssertFunction(r4);
 
+  // Make r10 point to the JavaScript frame.
+  __ mr(r10, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ LoadP(r10, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ LoadP(ip, MemOperand(r10, StandardFrameConstants::kFunctionOffset));
+    __ cmp(ip, r4);
+    __ beq(&ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
+  }
+
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
   __ LoadWordArith(
@@ -4947,19 +4827,20 @@
   __ SmiTag(r5);
 #endif
   __ SmiToPtrArrayOffset(r6, r5);
-  __ add(r6, fp, r6);
+  __ add(r6, r10, r6);
   __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
 
   // r4 : function
   // r5 : number of parameters (tagged)
   // r6 : parameters pointer
+  // r10 : JavaScript frame pointer
   // Registers used over whole function:
   // r8 : arguments count (tagged)
   // r9 : mapped parameter count (tagged)
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
-  __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r7, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
   __ LoadP(r3, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   __ beq(&adaptor_frame);
@@ -5022,7 +4903,7 @@
   __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
+  __ Allocate(r11, r3, r11, r7, &runtime, NO_ALLOCATION_FLAGS);
 
   // r3 = address of new object(s) (tagged)
   // r5 = argument count (smi-tagged)
@@ -5191,20 +5072,20 @@
   // -----------------------------------
   __ AssertFunction(r4);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make r5 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ mr(r5, fp);
-    __ b(&loop_entry);
-    __ bind(&loop);
+  // Make r5 point to the JavaScript frame.
+  __ mr(r5, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
     __ cmp(ip, r4);
-    __ bne(&loop);
+    __ b(&ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -5246,7 +5127,7 @@
   Label allocate, done_allocate;
   __ mov(r4, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
   __ add(r4, r4, r9);
-  __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+  __ Allocate(r4, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in r6.
@@ -5623,7 +5504,11 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
+
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
 
   // context save
   __ push(context);
@@ -5659,10 +5544,10 @@
   // it's not controlled by GC.
   // PPC LINUX ABI:
   //
-  // Create 5 extra slots on stack:
+  // Create 4 extra slots on stack:
   //    [0] space for DirectCEntryStub's LR save
-  //    [1-4] FunctionCallbackInfo
-  const int kApiStackSpace = 5;
+  //    [1-3] FunctionCallbackInfo
+  const int kApiStackSpace = 4;
   const int kFunctionCallbackInfoOffset =
       (kStackFrameExtraParamSlot + 1) * kPointerSize;
 
@@ -5681,9 +5566,6 @@
   // FunctionCallbackInfo::length_ = argc
   __ li(ip, Operand(argc()));
   __ stw(ip, MemOperand(r3, 2 * kPointerSize));
-  // FunctionCallbackInfo::is_construct_call_ = 0
-  __ li(ip, Operand::Zero());
-  __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5700,9 +5582,9 @@
   }
   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   int stack_space = 0;
-  MemOperand is_construct_call_operand =
-      MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
-  MemOperand* stack_space_operand = &is_construct_call_operand;
+  MemOperand length_operand =
+      MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
+  MemOperand* stack_space_operand = &length_operand;
   stack_space = argc() + FCA::kArgsLength + 1;
   stack_space_operand = NULL;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
@@ -5712,18 +5594,39 @@
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- sp[0]                        : name
-  //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- r5                           : api_function_address
-  // -----------------------------------
-
-  Register api_function_address = ApiGetterDescriptor::function_address();
   int arg0Slot = 0;
   int accessorInfoSlot = 0;
   int apiStackSpace = 0;
-  DCHECK(api_function_address.is(r5));
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
+  Register scratch = r7;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+  Register api_function_address = r5;
+
+  __ push(receiver);
+  // Push data from AccessorInfo.
+  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+  __ push(scratch);
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Push(scratch, scratch);
+  __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+  __ Push(scratch, holder);
+  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+  __ push(scratch);
 
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5771,6 +5674,10 @@
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+  __ LoadP(api_function_address,
+        FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
   // +3 is to skip prolog, return address and name handle.
   MemOperand return_value_operand(
       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5778,7 +5685,6 @@
                            kStackUnwindSpace, NULL, return_value_operand, NULL);
 }
 
-
 #undef __
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ppc/codegen-ppc.cc b/src/ppc/codegen-ppc.cc
index 5642e91..2139d87 100644
--- a/src/ppc/codegen-ppc.cc
+++ b/src/ppc/codegen-ppc.cc
@@ -185,6 +185,7 @@
   __ SmiToDoubleArrayOffset(scratch3, length);
   __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
   __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
+  __ subi(array, array, Operand(kHeapObjectTag));
   // array: destination FixedDoubleArray, not tagged as heap object.
   // elements: source FixedArray.
 
@@ -313,12 +314,12 @@
   __ add(array_size, array_size, r0);
   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
               NO_ALLOCATION_FLAGS);
-  // array: destination FixedArray, not tagged as heap object
+  // array: destination FixedArray, tagged as heap object
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
-  __ addi(array, array, Operand(kHeapObjectTag));
+  __ StoreP(length, FieldMemOperand(array,
+            FixedDoubleArray::kLengthOffset), r0);
+  __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
 
   // Prepare for conversion loop.
   Register src_elements = elements;
diff --git a/src/ppc/interface-descriptors-ppc.cc b/src/ppc/interface-descriptors-ppc.cc
index 48b6cdc..6426316 100644
--- a/src/ppc/interface-descriptors-ppc.cc
+++ b/src/ppc/interface-descriptors-ppc.cc
@@ -46,16 +46,11 @@
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
 
 
-const Register InstanceOfDescriptor::LeftRegister() { return r4; }
-const Register InstanceOfDescriptor::RightRegister() { return r3; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return r4; }
 const Register StringCompareDescriptor::RightRegister() { return r3; }
 
-
-const Register ApiGetterDescriptor::function_address() { return r5; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return r3; }
+const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
 
 const Register MathPowTaggedDescriptor::exponent() { return r5; }
 
@@ -68,6 +63,8 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return r3; }
+const Register HasPropertyDescriptor::KeyRegister() { return r6; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -244,13 +241,16 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3};
+  // register state
+  // r3 -- number of arguments
+  // r4 -- function
+  // r5 -- allocation site with elements kind
+  Register registers[] = {r4, r5, r3};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
@@ -314,6 +314,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -374,9 +379,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
-      kInterpreterDispatchTableRegister};
+  kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+  kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -410,6 +414,16 @@
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
+
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r3,  // the value to pass to the generator
+      r4,  // the JSGeneratorObject to resume
+      r5   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ppc/macro-assembler-ppc.cc b/src/ppc/macro-assembler-ppc.cc
index 42e5a13..0f5f3a7 100644
--- a/src/ppc/macro-assembler-ppc.cc
+++ b/src/ppc/macro-assembler-ppc.cc
@@ -1790,6 +1790,7 @@
                               Register scratch1, Register scratch2,
                               Label* gc_required, AllocationFlags flags) {
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1875,18 +1876,21 @@
     blt(gc_required);
     add(result_end, result, result_end);
   }
-  StoreP(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    addi(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    StoreP(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  addi(result, result, Operand(kHeapObjectTag));
 }
 
 
 void MacroAssembler::Allocate(Register object_size, Register result,
                               Register result_end, Register scratch,
                               Label* gc_required, AllocationFlags flags) {
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1976,12 +1980,114 @@
     andi(r0, result_end, Operand(kObjectAlignmentMask));
     Check(eq, kUnalignedAllocationInNewSpace, cr0);
   }
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    StoreP(result_end, MemOperand(top_address));
+  }
+
+  // Tag object.
+  addi(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, Register scratch,
+                                  AllocationFlags flags) {
+  // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+  // is not specified. Other registers must not overlap.
+  DCHECK(!AreAliased(object_size, result, scratch, ip));
+  DCHECK(!AreAliased(result_end, result, scratch, ip));
+  DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  Register top_address = scratch;
+  mov(top_address, Operand(allocation_top));
+  LoadP(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_PPC64
+    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    andi(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    beq(&aligned);
+    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    stw(result_end, MemOperand(result));
+    addi(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+#endif
+  }
+
+  // Calculate new top using result. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
+    add(result_end, result, result_end);
+  } else {
+    add(result_end, result, object_size);
+  }
+
+  // Update allocation top. result temporarily holds the new top.
+  if (emit_debug_code()) {
+    andi(r0, result_end, Operand(kObjectAlignmentMask));
+    Check(eq, kUnalignedAllocationInNewSpace, cr0);
+  }
   StoreP(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    addi(result, result, Operand(kHeapObjectTag));
+  // Tag object.
+  addi(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register scratch1, Register scratch2,
+                                  AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
   }
+  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address register.
+  Register top_address = scratch1;
+  Register result_end = scratch2;
+  mov(top_address, Operand(allocation_top));
+  LoadP(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_PPC64
+    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    andi(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    beq(&aligned);
+    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    stw(result_end, MemOperand(result));
+    addi(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+#endif
+  }
+
+  // Calculate new top using result.
+  Add(result_end, result, object_size, r0);
+
+  // The top pointer is not updated for allocation folding dominators.
+  StoreP(result_end, MemOperand(top_address));
+
+  // Tag object.
+  addi(result, result, Operand(kHeapObjectTag));
 }
 
 
@@ -1999,7 +2105,8 @@
   and_(scratch1, scratch1, r0);
 
   // Allocate two-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
@@ -2021,7 +2128,8 @@
   and_(scratch1, scratch1, r0);
 
   // Allocate one-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -2034,7 +2142,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
                       scratch2);
@@ -2046,7 +2154,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -2059,7 +2167,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
                       scratch2);
@@ -2072,7 +2180,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -2994,6 +3102,18 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object, r0);
+    Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+    push(object);
+    CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+    pop(object);
+    Check(eq, kOperandIsNotAGeneratorObject);
+  }
+}
+
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
     STATIC_ASSERT(kSmiTag == 0);
@@ -3087,12 +3207,11 @@
                                         Register scratch2,
                                         Register heap_number_map,
                                         Label* gc_required,
-                                        TaggingMode tagging_mode,
                                         MutableMode mode) {
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
-           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+           NO_ALLOCATION_FLAGS);
 
   Heap::RootListIndex map_index = mode == MUTABLE
                                       ? Heap::kMutableHeapNumberMapRootIndex
@@ -3100,12 +3219,8 @@
   AssertIsRoot(heap_number_map, map_index);
 
   // Store heap number map in the allocated object.
-  if (tagging_mode == TAG_RESULT) {
-    StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
-           r0);
-  } else {
-    StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
-  }
+  StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
+        r0);
 }
 
 
@@ -3126,7 +3241,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -4202,11 +4318,7 @@
     /* cannot use d-form */
     DCHECK(!scratch.is(no_reg));
     mov(scratch, Operand(offset));
-#if V8_TARGET_ARCH_PPC64
-    ldx(dst, MemOperand(mem.ra(), scratch));
-#else
-    lwzx(dst, MemOperand(mem.ra(), scratch));
-#endif
+    LoadPX(dst, MemOperand(mem.ra(), scratch));
   } else {
 #if V8_TARGET_ARCH_PPC64
     int misaligned = (offset & 3);
@@ -4225,6 +4337,23 @@
   }
 }
 
+void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
+                            Register scratch) {
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    /* cannot use d-form */
+    DCHECK(!scratch.is(no_reg));
+    mov(scratch, Operand(offset));
+    LoadPUX(dst, MemOperand(mem.ra(), scratch));
+  } else {
+#if V8_TARGET_ARCH_PPC64
+    ldu(dst, mem);
+#else
+    lwzu(dst, mem);
+#endif
+  }
+}
 
 // Store a "pointer" sized value to the memory location
 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
@@ -4235,11 +4364,7 @@
     /* cannot use d-form */
     DCHECK(!scratch.is(no_reg));
     mov(scratch, Operand(offset));
-#if V8_TARGET_ARCH_PPC64
-    stdx(src, MemOperand(mem.ra(), scratch));
-#else
-    stwx(src, MemOperand(mem.ra(), scratch));
-#endif
+    StorePX(src, MemOperand(mem.ra(), scratch));
   } else {
 #if V8_TARGET_ARCH_PPC64
     int misaligned = (offset & 3);
@@ -4263,6 +4388,24 @@
   }
 }
 
+void MacroAssembler::StorePU(Register src, const MemOperand& mem,
+                             Register scratch) {
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    /* cannot use d-form */
+    DCHECK(!scratch.is(no_reg));
+    mov(scratch, Operand(offset));
+    StorePUX(src, MemOperand(mem.ra(), scratch));
+  } else {
+#if V8_TARGET_ARCH_PPC64
+    stdu(src, mem);
+#else
+    stwu(src, mem);
+#endif
+  }
+}
+
 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
                                    Register scratch) {
   int offset = mem.offset();
@@ -4457,6 +4600,44 @@
   }
 }
 
+void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
+                                Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    mov(scratch, Operand(offset));
+    lfdux(dst, MemOperand(base, scratch));
+  } else {
+    lfdu(dst, mem);
+  }
+}
+
+void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
+                                Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    mov(scratch, Operand(offset));
+    lfsx(dst, MemOperand(base, scratch));
+  } else {
+    lfs(dst, mem);
+  }
+}
+
+void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
+                                Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    mov(scratch, Operand(offset));
+    lfsux(dst, MemOperand(base, scratch));
+  } else {
+    lfsu(dst, mem);
+  }
+}
 
 void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
                                  Register scratch) {
@@ -4471,13 +4652,52 @@
   }
 }
 
+void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
+                                 Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    mov(scratch, Operand(offset));
+    stfdux(src, MemOperand(base, scratch));
+  } else {
+    stfdu(src, mem);
+  }
+}
+
+void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
+                                 Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    mov(scratch, Operand(offset));
+    stfsx(src, MemOperand(base, scratch));
+  } else {
+    stfs(src, mem);
+  }
+}
+
+void MacroAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
+                                 Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    mov(scratch, Operand(offset));
+    stfsux(src, MemOperand(base, scratch));
+  } else {
+    stfsu(src, mem);
+  }
+}
+
 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
                                                      Register scratch_reg,
                                                      Register scratch2_reg,
                                                      Label* no_memento_found) {
   Label map_check;
   Label top_check;
-  ExternalReference new_space_allocation_top =
+  ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -4494,7 +4714,9 @@
 
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  Xor(r0, scratch_reg, Operand(new_space_allocation_top));
+  mov(ip, Operand(new_space_allocation_top_adr));
+  LoadP(ip, MemOperand(ip));
+  Xor(r0, scratch_reg, Operand(ip));
   and_(r0, r0, mask, SetRC);
   beq(&top_check, cr0);
   // The object is on a different page than allocation top. Bail out if the
@@ -4508,7 +4730,7 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  Cmpi(scratch_reg, Operand(new_space_allocation_top), r0);
+  cmp(scratch_reg, ip);
   bgt(no_memento_found);
   // Memento map check.
   bind(&map_check);
diff --git a/src/ppc/macro-assembler-ppc.h b/src/ppc/macro-assembler-ppc.h
index a529b62..0d5df2f 100644
--- a/src/ppc/macro-assembler-ppc.h
+++ b/src/ppc/macro-assembler-ppc.h
@@ -19,8 +19,8 @@
 const Register kReturnRegister2 = {Register::kCode_r5};
 const Register kJSFunctionRegister = {Register::kCode_r4};
 const Register kContextRegister = {Register::kCode_r30};
+const Register kAllocateSizeRegister = {Register::kCode_r4};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r14};
 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
 const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
@@ -73,10 +73,8 @@
 
 // These exist to provide portability between 32 and 64bit
 #if V8_TARGET_ARCH_PPC64
-#define LoadPU ldu
 #define LoadPX ldx
 #define LoadPUX ldux
-#define StorePU stdu
 #define StorePX stdx
 #define StorePUX stdux
 #define ShiftLeftImm sldi
@@ -90,10 +88,8 @@
 #define Mul mulld
 #define Div divd
 #else
-#define LoadPU lwzu
 #define LoadPX lwzx
 #define LoadPUX lwzux
-#define StorePU stwu
 #define StorePX stwx
 #define StorePUX stwux
 #define ShiftLeftImm slwi
@@ -515,8 +511,25 @@
   void StoreRepresentation(Register src, const MemOperand& mem,
                            Representation r, Register scratch = no_reg);
 
-  void LoadDouble(DoubleRegister dst, const MemOperand& mem, Register scratch);
-  void StoreDouble(DoubleRegister src, const MemOperand& mem, Register scratch);
+  void LoadDouble(DoubleRegister dst, const MemOperand& mem,
+                  Register scratch = no_reg);
+  void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
+                  Register scratch = no_reg);
+
+  void LoadSingle(DoubleRegister dst, const MemOperand& mem,
+                  Register scratch = no_reg);
+  void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
+                   Register scratch = no_reg);
+
+  void StoreDouble(DoubleRegister src, const MemOperand& mem,
+                   Register scratch = no_reg);
+  void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
+                   Register scratch = no_reg);
+
+  void StoreSingle(DoubleRegister src, const MemOperand& mem,
+                   Register scratch = no_reg);
+  void StoreSingleU(DoubleRegister src, const MemOperand& mem,
+                    Register scratch = no_reg);
 
   // Move values between integer and floating point registers.
   void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
@@ -573,7 +586,9 @@
 
   // These exist to provide portability between 32 and 64bit
   void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+  void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
   void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
+  void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
 
   // ---------------------------------------------------------------------------
   // JavaScript invokes
@@ -696,6 +711,15 @@
   void Allocate(Register object_size, Register result, Register result_end,
                 Register scratch, Label* gc_required, AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(int object_size, Register result, Register scratch1,
+                    Register scratch2, AllocationFlags flags);
+
+  void FastAllocate(Register object_size, Register result, Register result_end,
+                    Register scratch, AllocationFlags flags);
+
   void AllocateTwoByteString(Register result, Register length,
                              Register scratch1, Register scratch2,
                              Register scratch3, Label* gc_required);
@@ -720,7 +744,6 @@
   // when control continues at the gc_required label.
   void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
                           Register heap_number_map, Label* gc_required,
-                          TaggingMode tagging_mode = TAG_RESULT,
                           MutableMode mode = IMMUTABLE);
   void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
                                    Register scratch1, Register scratch2,
@@ -1380,6 +1403,10 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
   void AssertReceiver(Register object);
 
diff --git a/src/profiler/allocation-tracker.cc b/src/profiler/allocation-tracker.cc
index 791cdf0..6acd191 100644
--- a/src/profiler/allocation-tracker.cc
+++ b/src/profiler/allocation-tracker.cc
@@ -149,8 +149,7 @@
 
 
 void AddressToTraceMap::Print() {
-  PrintF("[AddressToTraceMap (%" V8_SIZET_PREFIX V8PRIuPTR "): \n",
-         ranges_.size());
+  PrintF("[AddressToTraceMap (%" PRIuS "): \n", ranges_.size());
   for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
     PrintF("[%p - %p] => %u\n", it->second.start, it->first,
         it->second.trace_node_id);
@@ -231,7 +230,7 @@
 
   Isolate* isolate = heap->isolate();
   int length = 0;
-  StackTraceFrameIterator it(isolate);
+  JavaScriptFrameIterator it(isolate);
   while (!it.done() && length < kMaxAllocationTraceLength) {
     JavaScriptFrame* frame = it.frame();
     SharedFunctionInfo* shared = frame->function()->shared();
@@ -307,9 +306,8 @@
       info_(info) {
   script_ = Handle<Script>::cast(
       script->GetIsolate()->global_handles()->Create(script));
-  GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
-                          this,
-                          &HandleWeakScript);
+  GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()), this,
+                          &HandleWeakScript, v8::WeakCallbackType::kParameter);
 }
 
 
@@ -327,9 +325,8 @@
   info_->column = Script::GetColumnNumber(script_, start_position_);
 }
 
-
 void AllocationTracker::UnresolvedLocation::HandleWeakScript(
-    const v8::WeakCallbackData<v8::Value, void>& data) {
+    const v8::WeakCallbackInfo<void>& data) {
   UnresolvedLocation* loc =
       reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
   GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
diff --git a/src/profiler/allocation-tracker.h b/src/profiler/allocation-tracker.h
index 03802a5..dbcf4a7 100644
--- a/src/profiler/allocation-tracker.h
+++ b/src/profiler/allocation-tracker.h
@@ -129,8 +129,7 @@
     void Resolve();
 
    private:
-    static void HandleWeakScript(
-        const v8::WeakCallbackData<v8::Value, void>& data);
+    static void HandleWeakScript(const v8::WeakCallbackInfo<void>& data);
 
     Handle<Script> script_;
     int start_position_;
diff --git a/src/profiler/cpu-profiler-inl.h b/src/profiler/cpu-profiler-inl.h
index 45e4ccf..d8c9c90 100644
--- a/src/profiler/cpu-profiler-inl.h
+++ b/src/profiler/cpu-profiler-inl.h
@@ -35,7 +35,7 @@
 
 void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
   CodeEntry* entry = code_map->FindEntry(start);
-  if (entry != NULL) entry->set_deopt_info(deopt_reason, position, pc_offset);
+  if (entry != NULL) entry->set_deopt_info(deopt_reason, position, deopt_id);
 }
 
 
diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc
index 47585b7..5e4a444 100644
--- a/src/profiler/cpu-profiler.cc
+++ b/src/profiler/cpu-profiler.cc
@@ -242,7 +242,7 @@
 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
                                   AbstractCode* code,
                                   SharedFunctionInfo* shared,
-                                  CompilationInfo* info, Name* script_name) {
+                                  Name* script_name) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
   rec->start = code->address();
@@ -253,9 +253,6 @@
       CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
       NULL, code->instruction_start());
   RecordInliningInfo(rec->entry, code);
-  if (info) {
-    rec->entry->set_inlined_function_infos(info->inlined_function_infos());
-  }
   rec->entry->FillFunctionInfo(shared);
   rec->size = code->ExecutableSize();
   processor_->Enqueue(evt_rec);
@@ -263,8 +260,7 @@
 
 void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
                                   AbstractCode* abstract_code,
-                                  SharedFunctionInfo* shared,
-                                  CompilationInfo* info, Name* script_name,
+                                  SharedFunctionInfo* shared, Name* script_name,
                                   int line, int column) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -310,9 +306,7 @@
       profiles_->GetName(InferScriptName(script_name, shared)), line, column,
       line_table, abstract_code->instruction_start());
   RecordInliningInfo(rec->entry, abstract_code);
-  if (info) {
-    rec->entry->set_inlined_function_infos(info->inlined_function_infos());
-  }
+  RecordDeoptInlinedFrames(rec->entry, abstract_code);
   rec->entry->FillFunctionInfo(shared);
   rec->size = abstract_code->ExecutableSize();
   processor_->Enqueue(evt_rec);
@@ -356,7 +350,7 @@
   rec->start = code->address();
   rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
   rec->position = info.position;
-  rec->pc_offset = pc - code->instruction_start();
+  rec->deopt_id = info.deopt_id;
   processor_->Enqueue(evt_rec);
   processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
 }
@@ -453,6 +447,54 @@
   }
 }
 
+void CpuProfiler::RecordDeoptInlinedFrames(CodeEntry* entry,
+                                           AbstractCode* abstract_code) {
+  if (abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION) return;
+  Code* code = abstract_code->GetCode();
+  DeoptimizationInputData* deopt_input_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  int const mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID);
+  for (RelocIterator rit(code, mask); !rit.done(); rit.next()) {
+    RelocInfo* reloc_info = rit.rinfo();
+    DCHECK(RelocInfo::IsDeoptId(reloc_info->rmode()));
+    int deopt_id = static_cast<int>(reloc_info->data());
+    int translation_index =
+        deopt_input_data->TranslationIndex(deopt_id)->value();
+    TranslationIterator it(deopt_input_data->TranslationByteArray(),
+                           translation_index);
+    Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+    DCHECK_EQ(Translation::BEGIN, opcode);
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
+    std::vector<CodeEntry::DeoptInlinedFrame> inlined_frames;
+    while (it.HasNext() &&
+           Translation::BEGIN !=
+               (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+      if (opcode != Translation::JS_FRAME &&
+          opcode != Translation::INTERPRETED_FRAME) {
+        it.Skip(Translation::NumberOfOperandsFor(opcode));
+        continue;
+      }
+      BailoutId ast_id = BailoutId(it.Next());
+      int shared_info_id = it.Next();
+      it.Next();  // Skip height
+      SharedFunctionInfo* shared = SharedFunctionInfo::cast(
+          deopt_input_data->LiteralArray()->get(shared_info_id));
+      int source_position = Deoptimizer::ComputeSourcePosition(shared, ast_id);
+      int script_id = v8::UnboundScript::kNoScriptId;
+      if (shared->script()->IsScript()) {
+        Script* script = Script::cast(shared->script());
+        script_id = script->id();
+      }
+      CodeEntry::DeoptInlinedFrame frame = {source_position, script_id};
+      inlined_frames.push_back(frame);
+    }
+    if (!inlined_frames.empty() && !entry->HasDeoptInlinedFramesFor(deopt_id)) {
+      entry->AddDeoptInlinedFrames(deopt_id, inlined_frames);
+      DCHECK(inlined_frames.empty());
+    }
+  }
+}
+
 CpuProfiler::CpuProfiler(Isolate* isolate)
     : isolate_(isolate),
       sampling_interval_(base::TimeDelta::FromMicroseconds(
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index a04ee3c..ed1e15f 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -6,13 +6,14 @@
 #define V8_PROFILER_CPU_PROFILER_H_
 
 #include "src/allocation.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
 #include "src/base/atomicops.h"
 #include "src/base/platform/time.h"
 #include "src/compiler.h"
 #include "src/locked-queue.h"
 #include "src/profiler/circular-queue.h"
 #include "src/profiler/sampler.h"
+#include "src/profiler/tick-sample.h"
 
 namespace v8 {
 namespace internal {
@@ -20,7 +21,6 @@
 // Forward declarations.
 class CodeEntry;
 class CodeMap;
-class CompilationInfo;
 class CpuProfile;
 class CpuProfilesCollection;
 class ProfileGenerator;
@@ -81,7 +81,7 @@
   Address start;
   const char* deopt_reason;
   SourcePosition position;
-  size_t pc_offset;
+  int deopt_id;
 
   INLINE(void UpdateCodeMap(CodeMap* code_map));
 };
@@ -176,7 +176,7 @@
   SamplingCircularQueue<TickSampleEventRecord,
                         kTickSampleQueueLength> ticks_buffer_;
   LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
-  AtomicNumber<unsigned> last_code_event_id_;
+  base::AtomicNumber<unsigned> last_code_event_id_;
   unsigned last_processed_code_event_id_;
 };
 
@@ -226,11 +226,10 @@
   void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                        Name* name) override;
   void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, CompilationInfo* info,
-                       Name* script_name) override;
+                       SharedFunctionInfo* shared, Name* script_name) override;
   void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
-                       SharedFunctionInfo* shared, CompilationInfo* info,
-                       Name* script_name, int line, int column) override;
+                       SharedFunctionInfo* shared, Name* script_name, int line,
+                       int column) override;
   void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
                        int args_count) override;
   void CodeMovingGCEvent() override {}
@@ -259,6 +258,7 @@
   void ResetProfiles();
   void LogBuiltins();
   void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
+  void RecordDeoptInlinedFrames(CodeEntry* entry, AbstractCode* abstract_code);
   Name* InferScriptName(Name* name, SharedFunctionInfo* info);
 
   Isolate* isolate_;
diff --git a/src/profiler/heap-profiler.cc b/src/profiler/heap-profiler.cc
index 1305cae..e048fae 100644
--- a/src/profiler/heap-profiler.cc
+++ b/src/profiler/heap-profiler.cc
@@ -84,14 +84,14 @@
   return result;
 }
 
-
-bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
-                                             int stack_depth) {
+bool HeapProfiler::StartSamplingHeapProfiler(
+    uint64_t sample_interval, int stack_depth,
+    v8::HeapProfiler::SamplingFlags flags) {
   if (sampling_heap_profiler_.get()) {
     return false;
   }
   sampling_heap_profiler_.Reset(new SamplingHeapProfiler(
-      heap(), names_.get(), sample_interval, stack_depth));
+      heap(), names_.get(), sample_interval, stack_depth, flags));
   return true;
 }
 
diff --git a/src/profiler/heap-profiler.h b/src/profiler/heap-profiler.h
index 32e143c..93cb57a 100644
--- a/src/profiler/heap-profiler.h
+++ b/src/profiler/heap-profiler.h
@@ -30,7 +30,8 @@
       v8::ActivityControl* control,
       v8::HeapProfiler::ObjectNameResolver* resolver);
 
-  bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth);
+  bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth,
+                                 v8::HeapProfiler::SamplingFlags);
   void StopSamplingHeapProfiler();
   bool is_sampling_allocations() { return !sampling_heap_profiler_.is_empty(); }
   AllocationProfile* GetAllocationProfile();
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index 748f307..e67acef 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -80,7 +80,7 @@
 void HeapEntry::Print(
     const char* prefix, const char* edge_name, int max_depth, int indent) {
   STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
-  base::OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ", self_size(), id(), indent,
+  base::OS::Print("%6" PRIuS " @%6u %*c %s%s: ", self_size(), id(), indent,
                   ' ', prefix, edge_name);
   if (type() != kString) {
     base::OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -1058,8 +1058,6 @@
     ExtractAccessorInfoReferences(entry, AccessorInfo::cast(obj));
   } else if (obj->IsAccessorPair()) {
     ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
-  } else if (obj->IsCodeCache()) {
-    ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
   } else if (obj->IsCode()) {
     ExtractCodeReferences(entry, Code::cast(obj));
   } else if (obj->IsBox()) {
@@ -1444,19 +1442,6 @@
 }
 
 
-void V8HeapExplorer::ExtractCodeCacheReferences(
-    int entry, CodeCache* code_cache) {
-  TagObject(code_cache->default_cache(), "(default code cache)");
-  SetInternalReference(code_cache, entry,
-                       "default_cache", code_cache->default_cache(),
-                       CodeCache::kDefaultCacheOffset);
-  TagObject(code_cache->normal_type_cache(), "(code type cache)");
-  SetInternalReference(code_cache, entry,
-                       "type_cache", code_cache->normal_type_cache(),
-                       CodeCache::kNormalTypeCacheOffset);
-}
-
-
 void V8HeapExplorer::TagBuiltinCodeObject(Code* code, const char* name) {
   TagObject(code, names_->GetFormatted("(%s builtin)", name));
 }
@@ -1598,14 +1583,8 @@
           int field_offset =
               field_index.is_inobject() ? field_index.offset() : -1;
 
-          if (k != heap_->hidden_properties_symbol()) {
-            SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
-                                               value, NULL, field_offset);
-          } else {
-            TagObject(value, "(hidden properties)");
-            SetInternalReference(js_obj, entry, "hidden_properties", value,
-                                 field_offset);
-          }
+          SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
+                                             value, NULL, field_offset);
           break;
         }
         case kDescriptor:
@@ -1625,11 +1604,6 @@
         DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
         PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
         Object* value = cell->value();
-        if (k == heap_->hidden_properties_symbol()) {
-          TagObject(value, "(hidden properties)");
-          SetInternalReference(js_obj, entry, "hidden_properties", value);
-          continue;
-        }
         PropertyDetails details = cell->property_details();
         SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
                                            Name::cast(k), value);
@@ -1642,11 +1616,6 @@
       Object* k = dictionary->KeyAt(i);
       if (dictionary->IsKey(k)) {
         Object* value = dictionary->ValueAt(i);
-        if (k == heap_->hidden_properties_symbol()) {
-          TagObject(value, "(hidden properties)");
-          SetInternalReference(js_obj, entry, "hidden_properties", value);
-          continue;
-        }
         PropertyDetails details = dictionary->DetailsAt(i);
         SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
                                            Name::cast(k), value);
@@ -2262,9 +2231,9 @@
   intptr_t elements = info->GetElementCount();
   intptr_t size = info->GetSizeInBytes();
   const char* name = elements != -1
-      ? names_->GetFormatted(
-            "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
-      : names_->GetCopy(info->GetLabel());
+                         ? names_->GetFormatted("%s / %" V8PRIdPTR " entries",
+                                                info->GetLabel(), elements)
+                         : names_->GetCopy(info->GetLabel());
   return snapshot_->AddEntry(
       entries_type_,
       name,
diff --git a/src/profiler/heap-snapshot-generator.h b/src/profiler/heap-snapshot-generator.h
index 857f240..255f61d 100644
--- a/src/profiler/heap-snapshot-generator.h
+++ b/src/profiler/heap-snapshot-generator.h
@@ -382,7 +382,6 @@
   void ExtractScriptReferences(int entry, Script* script);
   void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
   void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
-  void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
   void ExtractCodeReferences(int entry, Code* code);
   void ExtractBoxReferences(int entry, Box* box);
   void ExtractCellReferences(int entry, Cell* cell);
diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc
index abcd9e5..b07601f 100644
--- a/src/profiler/profile-generator.cc
+++ b/src/profiler/profile-generator.cc
@@ -5,11 +5,12 @@
 #include "src/profiler/profile-generator.h"
 
 #include "src/ast/scopeinfo.h"
+#include "src/base/adapters.h"
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
 #include "src/profiler/profile-generator-inl.h"
-#include "src/profiler/sampler.h"
+#include "src/profiler/tick-sample.h"
 #include "src/splay-tree-inl.h"
 #include "src/unicode.h"
 
@@ -118,6 +119,19 @@
   return it != inline_locations_.end() ? &it->second : NULL;
 }
 
+void CodeEntry::AddDeoptInlinedFrames(
+    int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
+  // It's better to use std::move to place the vector into the map,
+  // but it's not supported by the current stdlibc++ on MacOS.
+  deopt_inlined_frames_
+      .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
+      .first->second.swap(inlined_frames);
+}
+
+bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
+  return deopt_inlined_frames_.find(deopt_id) != deopt_inlined_frames_.end();
+}
+
 void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
   if (!shared->script()->IsScript()) return;
   Script* script = Script::cast(shared->script());
@@ -131,30 +145,20 @@
 
   CpuProfileDeoptInfo info;
   info.deopt_reason = deopt_reason_;
-  if (inlined_function_infos_.empty()) {
+  DCHECK_NE(Deoptimizer::DeoptInfo::kNoDeoptId, deopt_id_);
+  if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
     info.stack.push_back(CpuProfileDeoptFrame(
         {script_id_, position_ + deopt_position_.position()}));
-    return info;
-  }
-  // Copy the only branch from the inlining tree where the deopt happened.
-  SourcePosition position = deopt_position_;
-  int inlining_id = InlinedFunctionInfo::kNoParentId;
-  for (size_t i = 0; i < inlined_function_infos_.size(); ++i) {
-    InlinedFunctionInfo& current_info = inlined_function_infos_.at(i);
-    if (std::binary_search(current_info.deopt_pc_offsets.begin(),
-                           current_info.deopt_pc_offsets.end(), pc_offset_)) {
-      inlining_id = static_cast<int>(i);
-      break;
+  } else {
+    size_t deopt_position = deopt_position_.raw();
+    // Copy stack of inlined frames where the deopt happened.
+    std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
+    for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
+      info.stack.push_back(CpuProfileDeoptFrame(
+          {inlined_frame.script_id, deopt_position + inlined_frame.position}));
+      deopt_position = 0;  // Done with innermost frame.
     }
   }
-  while (inlining_id != InlinedFunctionInfo::kNoParentId) {
-    InlinedFunctionInfo& inlined_info = inlined_function_infos_.at(inlining_id);
-    info.stack.push_back(
-        CpuProfileDeoptFrame({inlined_info.script_id,
-                              inlined_info.start_position + position.raw()}));
-    position = inlined_info.inline_position;
-    inlining_id = inlined_info.parent_id;
-  }
   return info;
 }
 
@@ -229,12 +233,13 @@
   base::OS::Print("\n");
   for (size_t i = 0; i < deopt_infos_.size(); ++i) {
     CpuProfileDeoptInfo& info = deopt_infos_[i];
-    base::OS::Print(
-        "%*s;;; deopted at script_id: %d position: %d with reason '%s'.\n",
-        indent + 10, "", info.stack[0].script_id, info.stack[0].position,
-        info.deopt_reason);
+    base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
+                    " with reason '%s'.\n",
+                    indent + 10, "", info.stack[0].script_id,
+                    info.stack[0].position, info.deopt_reason);
     for (size_t index = 1; index < info.stack.size(); ++index) {
-      base::OS::Print("%*s;;;     Inline point: script_id %d position: %d.\n",
+      base::OS::Print("%*s;;;     Inline point: script_id %d position: %" PRIuS
+                      ".\n",
                       indent + 10, "", info.stack[index].script_id,
                       info.stack[index].position);
     }
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index 194b490..5c017e1 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -47,6 +47,13 @@
                    Address instruction_start = NULL);
   ~CodeEntry();
 
+  // Container describing inlined frames at eager deopt points. Is eventually
+  // being translated into v8::CpuProfileDeoptFrame by the profiler.
+  struct DeoptInlinedFrame {
+    int position;
+    int script_id;
+  };
+
   const char* name_prefix() const { return name_prefix_; }
   bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
   const char* name() const { return name_; }
@@ -64,11 +71,11 @@
   const char* bailout_reason() const { return bailout_reason_; }
 
   void set_deopt_info(const char* deopt_reason, SourcePosition position,
-                      size_t pc_offset) {
+                      int deopt_id) {
     DCHECK(deopt_position_.IsUnknown());
     deopt_reason_ = deopt_reason;
     deopt_position_ = position;
-    pc_offset_ = pc_offset;
+    deopt_id_ = deopt_id;
   }
   CpuProfileDeoptInfo GetDeoptInfo();
   const char* deopt_reason() const { return deopt_reason_; }
@@ -81,14 +88,6 @@
 
   void FillFunctionInfo(SharedFunctionInfo* shared);
 
-  void set_inlined_function_infos(
-      const std::vector<InlinedFunctionInfo>& infos) {
-    inlined_function_infos_ = infos;
-  }
-  const std::vector<InlinedFunctionInfo> inlined_function_infos() {
-    return inlined_function_infos_;
-  }
-
   void SetBuiltinId(Builtins::Name id);
   Builtins::Name builtin_id() const {
     return BuiltinIdField::decode(bit_field_);
@@ -102,6 +101,9 @@
   void AddInlineStack(int pc_offset, std::vector<CodeEntry*>& inline_stack);
   const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
 
+  void AddDeoptInlinedFrames(int deopt_id, std::vector<DeoptInlinedFrame>&);
+  bool HasDeoptInlinedFramesFor(int deopt_id) const;
+
   Address instruction_start() const { return instruction_start_; }
   Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
 
@@ -125,13 +127,12 @@
   const char* bailout_reason_;
   const char* deopt_reason_;
   SourcePosition deopt_position_;
-  size_t pc_offset_;
+  int deopt_id_;
   JITLineInfoTable* line_info_;
   Address instruction_start_;
   // Should be an unordered_map, but it doesn't currently work on Win & MacOS.
   std::map<int, std::vector<CodeEntry*>> inline_locations_;
-
-  std::vector<InlinedFunctionInfo> inlined_function_infos_;
+  std::map<int, std::vector<DeoptInlinedFrame>> deopt_inlined_frames_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeEntry);
 };
diff --git a/src/profiler/sampler.cc b/src/profiler/sampler.cc
index a340424..ae47dca 100644
--- a/src/profiler/sampler.cc
+++ b/src/profiler/sampler.cc
@@ -42,14 +42,12 @@
 
 #endif
 
+#include "src/base/atomic-utils.h"
 #include "src/base/platform/platform.h"
-#include "src/flags.h"
-#include "src/frames-inl.h"
-#include "src/log.h"
 #include "src/profiler/cpu-profiler-inl.h"
+#include "src/profiler/tick-sample.h"
 #include "src/simulator.h"
 #include "src/v8threads.h"
-#include "src/vm-state-inl.h"
 
 
 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
@@ -174,68 +172,52 @@
 };
 
 
-bool IsSamePage(byte* ptr1, byte* ptr2) {
-  const uint32_t kPageSize = 4096;
-  uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
-  return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
-         (reinterpret_cast<uintptr_t>(ptr2) & mask);
-}
+typedef List<Sampler*> SamplerList;
 
-
-// Check if the code at specified address could potentially be a
-// frame setup code.
-bool IsNoFrameRegion(Address address) {
-  struct Pattern {
-    int bytes_count;
-    byte bytes[8];
-    int offsets[4];
-  };
-  byte* pc = reinterpret_cast<byte*>(address);
-  static Pattern patterns[] = {
-#if V8_HOST_ARCH_IA32
-    // push %ebp
-    // mov %esp,%ebp
-    {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
-    // pop %ebp
-    // ret N
-    {2, {0x5d, 0xc2}, {0, 1, -1}},
-    // pop %ebp
-    // ret
-    {2, {0x5d, 0xc3}, {0, 1, -1}},
-#elif V8_HOST_ARCH_X64
-    // pushq %rbp
-    // movq %rsp,%rbp
-    {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
-    // popq %rbp
-    // ret N
-    {2, {0x5d, 0xc2}, {0, 1, -1}},
-    // popq %rbp
-    // ret
-    {2, {0x5d, 0xc3}, {0, 1, -1}},
-#endif
-    {0, {}, {}}
-  };
-  for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
-    for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
-      int offset = *offset_ptr;
-      if (!offset || IsSamePage(pc, pc - offset)) {
-        MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
-        if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
-          return true;
-      } else {
-        // It is not safe to examine bytes on another page as it might not be
-        // allocated thus causing a SEGFAULT.
-        // Check the pattern part that's on the same page and
-        // pessimistically assume it could be the entire pattern match.
-        MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
-        if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
-          return true;
-      }
-    }
+#if defined(USE_SIGNALS)
+class AtomicGuard {
+ public:
+  explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true)
+      : atomic_(atomic),
+        is_success_(false) {
+    do {
+      // Use Acquire_Load to gain mutual exclusion.
+      USE(atomic_->Value());
+      is_success_ = atomic_->TrySetValue(0, 1);
+    } while (is_block && !is_success_);
   }
-  return false;
+
+  bool is_success() { return is_success_; }
+
+  ~AtomicGuard() {
+    if (is_success_) {
+      atomic_->SetValue(0);
+    }
+    atomic_ = NULL;
+  }
+
+ private:
+  base::AtomicValue<int>* atomic_;
+  bool is_success_;
+};
+
+
+// Returns key for hash map.
+void* ThreadKey(pthread_t thread_id) {
+  return reinterpret_cast<void*>(thread_id);
 }
 
+
+// Returns hash value for hash map.
+uint32_t ThreadHash(pthread_t thread_id) {
+#if V8_OS_MACOSX
+  return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
+#else
+  return static_cast<uint32_t>(thread_id);
+#endif
+}
+#endif  // USE_SIGNALS
+
 }  // namespace
 
 #if defined(USE_SIGNALS)
@@ -284,75 +266,6 @@
 #endif
 
 
-#if defined(USE_SIMULATOR)
-class SimulatorHelper {
- public:
-  inline bool Init(Isolate* isolate) {
-    simulator_ = isolate->thread_local_top()->simulator_;
-    // Check if there is active simulator.
-    return simulator_ != NULL;
-  }
-
-  inline void FillRegisters(v8::RegisterState* state) {
-#if V8_TARGET_ARCH_ARM
-    if (!simulator_->has_bad_pc()) {
-      state->pc = reinterpret_cast<Address>(simulator_->get_pc());
-    }
-    state->sp = reinterpret_cast<Address>(simulator_->get_register(
-        Simulator::sp));
-    state->fp = reinterpret_cast<Address>(simulator_->get_register(
-        Simulator::r11));
-#elif V8_TARGET_ARCH_ARM64
-    if (simulator_->sp() == 0 || simulator_->fp() == 0) {
-      // It's possible that the simulator is interrupted while it is updating
-      // the sp or fp register. ARM64 simulator does this in two steps:
-      // first setting it to zero and then setting it to a new value.
-      // Bailout if sp/fp doesn't contain the new value.
-      //
-      // FIXME: The above doesn't really solve the issue.
-      // If a 64-bit target is executed on a 32-bit host even the final
-      // write is non-atomic, so it might obtain a half of the result.
-      // Moreover as long as the register set code uses memcpy (as of now),
-      // it is not guaranteed to be atomic even when both host and target
-      // are of same bitness.
-      return;
-    }
-    state->pc = reinterpret_cast<Address>(simulator_->pc());
-    state->sp = reinterpret_cast<Address>(simulator_->sp());
-    state->fp = reinterpret_cast<Address>(simulator_->fp());
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-    if (!simulator_->has_bad_pc()) {
-      state->pc = reinterpret_cast<Address>(simulator_->get_pc());
-    }
-    state->sp = reinterpret_cast<Address>(simulator_->get_register(
-        Simulator::sp));
-    state->fp = reinterpret_cast<Address>(simulator_->get_register(
-        Simulator::fp));
-#elif V8_TARGET_ARCH_PPC
-    if (!simulator_->has_bad_pc()) {
-      state->pc = reinterpret_cast<Address>(simulator_->get_pc());
-    }
-    state->sp =
-        reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
-    state->fp =
-        reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
-#elif V8_TARGET_ARCH_S390
-    if (!simulator_->has_bad_pc()) {
-      state->pc = reinterpret_cast<Address>(simulator_->get_pc());
-    }
-    state->sp =
-        reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
-    state->fp =
-        reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
-#endif
-  }
-
- private:
-  Simulator* simulator_;
-};
-#endif  // USE_SIMULATOR
-
-
 #if defined(USE_SIGNALS)
 
 class SignalHandler : public AllStatic {
@@ -374,6 +287,10 @@
     return signal_handler_installed_;
   }
 
+#if !V8_OS_NACL
+  static void CollectSample(void* context, Sampler* sampler);
+#endif
+
  private:
   static void Install() {
 #if !V8_OS_NACL
@@ -418,34 +335,25 @@
 
 // As Native Client does not support signal handling, profiling is disabled.
 #if !V8_OS_NACL
-void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
-                                         void* context) {
-  USE(info);
-  if (signal != SIGPROF) return;
-  Isolate* isolate = Isolate::UnsafeCurrent();
-  if (isolate == NULL || !isolate->IsInUse()) {
-    // We require a fully initialized and entered isolate.
+void SignalHandler::CollectSample(void* context, Sampler* sampler) {
+  if (sampler == NULL || (!sampler->IsProfiling() &&
+                          !sampler->IsRegistered())) {
     return;
   }
+  Isolate* isolate = sampler->isolate();
+
+  // We require a fully initialized and entered isolate.
+  if (isolate == NULL || !isolate->IsInUse()) return;
+
   if (v8::Locker::IsActive() &&
       !isolate->thread_manager()->IsLockedByCurrentThread()) {
     return;
   }
 
-  Sampler* sampler = isolate->logger()->sampler();
-  if (sampler == NULL) return;
-
   v8::RegisterState state;
 
 #if defined(USE_SIMULATOR)
-  SimulatorHelper helper;
-  if (!helper.Init(isolate)) return;
-  helper.FillRegisters(&state);
-  // It possible that the simulator is interrupted while it is updating
-  // the sp or fp register. ARM64 simulator does this in two steps:
-  // first setting it to zero and then setting it to the new value.
-  // Bailout if sp/fp doesn't contain the new value.
-  if (state.sp == 0 || state.fp == 0) return;
+  if (!SimulatorHelper::FillRegisters(isolate, &state)) return;
 #else
   // Extracting the sample from the context is extremely machine dependent.
   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -582,7 +490,7 @@
 }
 #endif  // V8_OS_NACL
 
-#endif
+#endif  // USE_SIGNALS
 
 
 class SamplerThread : public base::Thread {
@@ -607,19 +515,46 @@
     }
 
     DCHECK(sampler->IsActive());
-    DCHECK(!instance_->active_samplers_.Contains(sampler));
     DCHECK(instance_->interval_ == sampler->interval());
+
+#if defined(USE_SIGNALS)
+    AddSampler(sampler);
+#else
+    DCHECK(!instance_->active_samplers_.Contains(sampler));
     instance_->active_samplers_.Add(sampler);
+#endif  // USE_SIGNALS
 
     if (need_to_start) instance_->StartSynchronously();
   }
 
-  static void RemoveActiveSampler(Sampler* sampler) {
+  static void RemoveSampler(Sampler* sampler) {
     SamplerThread* instance_to_remove = NULL;
     {
       base::LockGuard<base::Mutex> lock_guard(mutex_);
 
-      DCHECK(sampler->IsActive());
+      DCHECK(sampler->IsActive() || sampler->IsRegistered());
+#if defined(USE_SIGNALS)
+      {
+        AtomicGuard atomic_guard(&sampler_list_access_counter_);
+        // Remove sampler from map.
+        pthread_t thread_id = sampler->platform_data()->vm_tid();
+        void* thread_key = ThreadKey(thread_id);
+        uint32_t thread_hash = ThreadHash(thread_id);
+        HashMap::Entry* entry =
+            thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash);
+        DCHECK(entry != NULL);
+        SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+        samplers->RemoveElement(sampler);
+        if (samplers->is_empty()) {
+          thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash);
+          delete samplers;
+        }
+        if (thread_id_to_samplers_.Get().occupancy() == 0) {
+          instance_to_remove = instance_;
+          instance_ = NULL;
+        }
+      }
+#else
       bool removed = instance_->active_samplers_.RemoveElement(sampler);
       DCHECK(removed);
       USE(removed);
@@ -630,6 +565,7 @@
         instance_to_remove = instance_;
         instance_ = NULL;
       }
+#endif  // USE_SIGNALS
     }
 
     if (!instance_to_remove) return;
@@ -637,11 +573,36 @@
     delete instance_to_remove;
   }
 
+  // Unlike AddActiveSampler, this method only adds a sampler,
+  // but won't start the sampler thread.
+  static void RegisterSampler(Sampler* sampler) {
+    base::LockGuard<base::Mutex> lock_guard(mutex_);
+#if defined(USE_SIGNALS)
+    AddSampler(sampler);
+#endif  // USE_SIGNALS
+  }
+
   // Implement Thread::Run().
   virtual void Run() {
     while (true) {
       {
         base::LockGuard<base::Mutex> lock_guard(mutex_);
+#if defined(USE_SIGNALS)
+        if (thread_id_to_samplers_.Get().occupancy() == 0) break;
+        if (SignalHandler::Installed()) {
+          for (HashMap::Entry *p = thread_id_to_samplers_.Get().Start();
+               p != NULL; p = thread_id_to_samplers_.Get().Next(p)) {
+#if V8_OS_AIX && V8_TARGET_ARCH_PPC64
+            // on AIX64, cannot cast (void *) to pthread_t which is
+            // of type unsigned int (4bytes)
+            pthread_t thread_id = reinterpret_cast<intptr_t>(p->key);
+#else
+            pthread_t thread_id = reinterpret_cast<pthread_t>(p->key);
+#endif
+            pthread_kill(thread_id, SIGPROF);
+          }
+        }
+#else
         if (active_samplers_.is_empty()) break;
         // When CPU profiling is enabled both JavaScript and C++ code is
         // profiled. We must not suspend.
@@ -650,6 +611,7 @@
           if (!sampler->IsProfiling()) continue;
           sampler->DoSample();
         }
+#endif  // USE_SIGNALS
       }
       base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
     }
@@ -661,7 +623,38 @@
   static SamplerThread* instance_;
 
   const int interval_;
-  List<Sampler*> active_samplers_;
+
+#if defined(USE_SIGNALS)
+  struct HashMapCreateTrait {
+    static void Construct(HashMap* allocated_ptr) {
+      new (allocated_ptr) HashMap(HashMap::PointersMatch);
+    }
+  };
+  friend class SignalHandler;
+  static base::LazyInstance<HashMap, HashMapCreateTrait>::type
+      thread_id_to_samplers_;
+  static base::AtomicValue<int> sampler_list_access_counter_;
+  static void AddSampler(Sampler* sampler) {
+    AtomicGuard atomic_guard(&sampler_list_access_counter_);
+    // Add sampler into map if needed.
+    pthread_t thread_id = sampler->platform_data()->vm_tid();
+    HashMap::Entry *entry =
+        thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
+                                                         ThreadHash(thread_id));
+    if (entry->value == NULL) {
+      SamplerList* samplers = new SamplerList();
+      samplers->Add(sampler);
+      entry->value = samplers;
+    } else {
+      SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+      if (!samplers->Contains(sampler)) {
+        samplers->Add(sampler);
+      }
+    }
+  }
+#else
+  SamplerList active_samplers_;
+#endif  // USE_SIGNALS
 
   DISALLOW_COPY_AND_ASSIGN(SamplerThread);
 };
@@ -669,103 +662,33 @@
 
 base::Mutex* SamplerThread::mutex_ = NULL;
 SamplerThread* SamplerThread::instance_ = NULL;
+#if defined(USE_SIGNALS)
+base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type
+    SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER;
+base::AtomicValue<int> SamplerThread::sampler_list_access_counter_(0);
 
-
-//
-// StackTracer implementation
-//
-DISABLE_ASAN void TickSample::Init(Isolate* isolate,
-                                   const v8::RegisterState& regs,
-                                   RecordCEntryFrame record_c_entry_frame,
-                                   bool update_stats) {
-  timestamp = base::TimeTicks::HighResolutionNow();
-  pc = reinterpret_cast<Address>(regs.pc);
-  state = isolate->current_vm_state();
-  this->update_stats = update_stats;
-
-  // Avoid collecting traces while doing GC.
-  if (state == GC) return;
-
-  Address js_entry_sp = isolate->js_entry_sp();
-  if (js_entry_sp == 0) return;  // Not executing JS now.
-
-  if (pc && IsNoFrameRegion(pc)) {
-    // Can't collect stack. Mark the sample as spoiled.
-    timestamp = base::TimeTicks();
-    pc = 0;
+// As Native Client does not support signal handling, profiling is disabled.
+#if !V8_OS_NACL
+void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
+                                         void* context) {
+  USE(info);
+  if (signal != SIGPROF) return;
+  AtomicGuard atomic_guard(&SamplerThread::sampler_list_access_counter_, false);
+  if (!atomic_guard.is_success()) return;
+  pthread_t thread_id = pthread_self();
+  HashMap::Entry* entry =
+      SamplerThread::thread_id_to_samplers_.Pointer()->Lookup(
+          ThreadKey(thread_id), ThreadHash(thread_id));
+  if (entry == NULL)
     return;
-  }
-
-  ExternalCallbackScope* scope = isolate->external_callback_scope();
-  Address handler = Isolate::handler(isolate->thread_local_top());
-  // If there is a handler on top of the external callback scope then
-  // we have already entrered JavaScript again and the external callback
-  // is not the top function.
-  if (scope && scope->scope_address() < handler) {
-    external_callback_entry = *scope->callback_entrypoint_address();
-    has_external_callback = true;
-  } else {
-    // sp register may point at an arbitrary place in memory, make
-    // sure MSAN doesn't complain about it.
-    MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
-    // Sample potential return address value for frameless invocation of
-    // stubs (we'll figure out later, if this value makes sense).
-    tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
-    has_external_callback = false;
-  }
-
-  SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
-                            reinterpret_cast<Address>(regs.sp), js_entry_sp);
-  top_frame_type = it.top_frame_type();
-
-  SampleInfo info;
-  GetStackSample(isolate, regs, record_c_entry_frame,
-                 reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
-  frames_count = static_cast<unsigned>(info.frames_count);
-  if (!frames_count) {
-    // It is executing JS but failed to collect a stack trace.
-    // Mark the sample as spoiled.
-    timestamp = base::TimeTicks();
-    pc = 0;
+  SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+  for (int i = 0; i < samplers->length(); ++i) {
+    Sampler* sampler = samplers->at(i);
+    CollectSample(context, sampler);
   }
 }
-
-
-void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
-                                RecordCEntryFrame record_c_entry_frame,
-                                void** frames, size_t frames_limit,
-                                v8::SampleInfo* sample_info) {
-  sample_info->frames_count = 0;
-  sample_info->vm_state = isolate->current_vm_state();
-  if (sample_info->vm_state == GC) return;
-
-  Address js_entry_sp = isolate->js_entry_sp();
-  if (js_entry_sp == 0) return;  // Not executing JS now.
-
-  SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
-                            reinterpret_cast<Address>(regs.sp), js_entry_sp);
-  size_t i = 0;
-  if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() &&
-      it.top_frame_type() == StackFrame::EXIT) {
-    frames[i++] = isolate->c_function();
-  }
-  while (!it.done() && i < frames_limit) {
-    if (it.frame()->is_interpreted()) {
-      // For interpreted frames use the bytecode array pointer as the pc.
-      InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
-      // Since the sampler can interrupt execution at any point the
-      // bytecode_array might be garbage, so don't dereference it.
-      Address bytecode_array =
-          reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
-      frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
-                    frame->GetBytecodeOffset();
-    } else {
-      frames[i++] = it.frame()->pc();
-    }
-    it.Advance();
-  }
-  sample_info->frames_count = i;
-}
+#endif  // !V8_OS_NACL
+#endif  // USE_SIGNALs
 
 
 void Sampler::SetUp() {
@@ -789,6 +712,7 @@
       profiling_(false),
       has_processing_thread_(false),
       active_(false),
+      registered_(false),
       is_counting_samples_(false),
       js_sample_count_(0),
       external_sample_count_(0) {
@@ -797,6 +721,9 @@
 
 Sampler::~Sampler() {
   DCHECK(!IsActive());
+  if (IsRegistered()) {
+    SamplerThread::RemoveSampler(this);
+  }
   delete data_;
 }
 
@@ -809,8 +736,9 @@
 
 void Sampler::Stop() {
   DCHECK(IsActive());
-  SamplerThread::RemoveActiveSampler(this);
+  SamplerThread::RemoveSampler(this);
   SetActive(false);
+  SetRegistered(false);
 }
 
 
@@ -850,6 +778,10 @@
 
 void Sampler::DoSample() {
   if (!SignalHandler::Installed()) return;
+  if (!IsActive() && !IsRegistered()) {
+    SamplerThread::RegisterSampler(this);
+    SetRegistered(true);
+  }
   pthread_kill(platform_data()->vm_tid(), SIGPROF);
 }
 
@@ -859,11 +791,6 @@
   HANDLE profiled_thread = platform_data()->profiled_thread();
   if (profiled_thread == NULL) return;
 
-#if defined(USE_SIMULATOR)
-  SimulatorHelper helper;
-  if (!helper.Init(isolate())) return;
-#endif
-
   const DWORD kSuspendFailed = static_cast<DWORD>(-1);
   if (SuspendThread(profiled_thread) == kSuspendFailed) return;
 
@@ -874,7 +801,10 @@
   if (GetThreadContext(profiled_thread, &context) != 0) {
     v8::RegisterState state;
 #if defined(USE_SIMULATOR)
-    helper.FillRegisters(&state);
+    if (!SimulatorHelper::FillRegisters(isolate(), &state)) {
+      ResumeThread(profiled_thread);
+      return;
+    }
 #else
 #if V8_HOST_ARCH_X64
     state.pc = reinterpret_cast<Address>(context.Rip);
diff --git a/src/profiler/sampler.h b/src/profiler/sampler.h
index dcd1255..3d3a6e9 100644
--- a/src/profiler/sampler.h
+++ b/src/profiler/sampler.h
@@ -8,14 +8,13 @@
 #include "include/v8.h"
 
 #include "src/base/atomicops.h"
-#include "src/base/platform/time.h"
-#include "src/frames.h"
-#include "src/globals.h"
+#include "src/base/macros.h"
 
 namespace v8 {
 namespace internal {
 
 class Isolate;
+struct TickSample;
 
 // ----------------------------------------------------------------------------
 // Sampler
@@ -24,43 +23,6 @@
 // (if used for profiling) the program counter and stack pointer for
 // the thread that created it.
 
-// TickSample captures the information collected for each sample.
-struct TickSample {
-  // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
-  // include the runtime function we're calling. Externally exposed tick
-  // samples don't care.
-  enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
-
-  TickSample()
-      : state(OTHER),
-        pc(NULL),
-        external_callback_entry(NULL),
-        frames_count(0),
-        has_external_callback(false),
-        update_stats(true),
-        top_frame_type(StackFrame::NONE) {}
-  void Init(Isolate* isolate, const v8::RegisterState& state,
-            RecordCEntryFrame record_c_entry_frame, bool update_stats);
-  static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
-                             RecordCEntryFrame record_c_entry_frame,
-                             void** frames, size_t frames_limit,
-                             v8::SampleInfo* sample_info);
-  StateTag state;  // The state of the VM.
-  Address pc;      // Instruction pointer.
-  union {
-    Address tos;   // Top stack value (*sp).
-    Address external_callback_entry;
-  };
-  static const unsigned kMaxFramesCountLog2 = 8;
-  static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
-  Address stack[kMaxFramesCount];  // Call stack.
-  base::TimeTicks timestamp;
-  unsigned frames_count : kMaxFramesCountLog2;  // Number of captured frames.
-  bool has_external_callback : 1;
-  bool update_stats : 1;  // Whether the sample should update aggregated stats.
-  StackFrame::Type top_frame_type : 5;
-};
-
 class Sampler {
  public:
   // Initializes the Sampler support. Called once at VM startup.
@@ -92,6 +54,11 @@
   // Whether the sampler is running (that is, consumes resources).
   bool IsActive() const { return base::NoBarrier_Load(&active_); }
 
+  // CpuProfiler collects samples by calling DoSample directly
+  // without calling Start. To keep it working, we register the sampler
+  // with the CpuProfiler.
+  bool IsRegistered() const { return base::NoBarrier_Load(&registered_); }
+
   void DoSample();
   // If true next sample must be initiated on the profiler event processor
   // thread right after latest sample is processed.
@@ -119,11 +86,14 @@
  private:
   void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
 
+  void SetRegistered(bool value) { base::NoBarrier_Store(&registered_, value); }
+
   Isolate* isolate_;
   const int interval_;
   base::Atomic32 profiling_;
   base::Atomic32 has_processing_thread_;
   base::Atomic32 active_;
+  base::Atomic32 registered_;
   PlatformData* data_;  // Platform specific data.
   // Counts stack samples taken in various VM states.
   bool is_counting_samples_;
@@ -132,7 +102,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
 };
 
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc
index a32cae3..db9214d 100644
--- a/src/profiler/sampling-heap-profiler.cc
+++ b/src/profiler/sampling-heap-profiler.cc
@@ -47,8 +47,9 @@
   return {size, static_cast<unsigned int>(count * scale + 0.5)};
 }
 
-SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
-                                           uint64_t rate, int stack_depth)
+SamplingHeapProfiler::SamplingHeapProfiler(
+    Heap* heap, StringsStorage* names, uint64_t rate, int stack_depth,
+    v8::HeapProfiler::SamplingFlags flags)
     : isolate_(heap->isolate()),
       heap_(heap),
       new_space_observer_(new SamplingAllocationObserver(
@@ -58,14 +59,15 @@
           heap_, static_cast<intptr_t>(rate), rate, this,
           heap->isolate()->random_number_generator())),
       names_(names),
-      profile_root_("(root)", v8::UnboundScript::kNoScriptId, 0),
+      profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0),
       samples_(),
       stack_depth_(stack_depth),
-      rate_(rate) {
+      rate_(rate),
+      flags_(flags) {
   CHECK_GT(rate_, 0);
   heap->new_space()->AddAllocationObserver(new_space_observer_.get());
   AllSpaces spaces(heap);
-  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+  for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
     if (space != heap->new_space()) {
       space->AddAllocationObserver(other_spaces_observer_.get());
     }
@@ -76,7 +78,7 @@
 SamplingHeapProfiler::~SamplingHeapProfiler() {
   heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
   AllSpaces spaces(heap_);
-  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+  for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
     if (space != heap_->new_space()) {
       space->RemoveAllocationObserver(other_spaces_observer_.get());
     }
@@ -109,6 +111,7 @@
   Sample* sample = new Sample(size, node, loc, this);
   samples_.insert(sample);
   sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+  sample->global.MarkIndependent();
 }
 
 void SamplingHeapProfiler::OnWeakCallback(
@@ -117,22 +120,34 @@
   AllocationNode* node = sample->owner;
   DCHECK(node->allocations_[sample->size] > 0);
   node->allocations_[sample->size]--;
+  if (node->allocations_[sample->size] == 0) {
+    node->allocations_.erase(sample->size);
+    while (node->allocations_.empty() && node->children_.empty() &&
+           node->parent_ && !node->parent_->pinned_) {
+      AllocationNode* parent = node->parent_;
+      AllocationNode::FunctionId id = AllocationNode::function_id(
+          node->script_id_, node->script_position_, node->name_);
+      parent->children_.erase(id);
+      delete node;
+      node = parent;
+    }
+  }
   sample->profiler->samples_.erase(sample);
   delete sample;
 }
 
-SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
-    AllocationNode* parent, const char* name, int script_id,
-    int start_position) {
-  for (AllocationNode* child : parent->children_) {
-    if (child->script_id_ == script_id &&
-        child->script_position_ == start_position &&
-        strcmp(child->name_, name) == 0) {
-      return child;
-    }
+SamplingHeapProfiler::AllocationNode*
+SamplingHeapProfiler::AllocationNode::FindOrAddChildNode(const char* name,
+                                                         int script_id,
+                                                         int start_position) {
+  FunctionId id = function_id(script_id, start_position, name);
+  auto it = children_.find(id);
+  if (it != children_.end()) {
+    DCHECK(strcmp(it->second->name_, name) == 0);
+    return it->second;
   }
-  AllocationNode* child = new AllocationNode(name, script_id, start_position);
-  parent->children_.push_back(child);
+  auto child = new AllocationNode(this, name, script_id, start_position);
+  children_.insert(std::make_pair(id, child));
   return child;
 }
 
@@ -140,7 +155,7 @@
   AllocationNode* node = &profile_root_;
 
   std::vector<SharedFunctionInfo*> stack;
-  StackTraceFrameIterator it(isolate_);
+  JavaScriptFrameIterator it(isolate_);
   int frames_captured = 0;
   while (!it.done() && frames_captured < stack_depth_) {
     JavaScriptFrame* frame = it.frame();
@@ -173,7 +188,7 @@
         name = "(JS)";
         break;
     }
-    return FindOrAddChildNode(node, name, v8::UnboundScript::kNoScriptId, 0);
+    return node->FindOrAddChildNode(name, v8::UnboundScript::kNoScriptId, 0);
   }
 
   // We need to process the stack in reverse order as the top of the stack is
@@ -186,14 +201,17 @@
       Script* script = Script::cast(shared->script());
       script_id = script->id();
     }
-    node = FindOrAddChildNode(node, name, script_id, shared->start_position());
+    node = node->FindOrAddChildNode(name, script_id, shared->start_position());
   }
   return node;
 }
 
 v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
     AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
-    const std::map<int, Script*>& scripts) {
+    const std::map<int, Handle<Script>>& scripts) {
+  // By pinning the node we make sure its children won't get disposed if
+  // a GC kicks in during the tree retrieval.
+  node->pinned_ = true;
   Local<v8::String> script_name =
       ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(""));
   int line = v8::AllocationProfile::kNoLineNumberInfo;
@@ -203,22 +221,21 @@
   if (node->script_id_ != v8::UnboundScript::kNoScriptId &&
       scripts.find(node->script_id_) != scripts.end()) {
     // Cannot use std::map<T>::at because it is not available on android.
-    auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
-    Script* script = non_const_scripts[node->script_id_];
-    if (script) {
+    auto non_const_scripts =
+        const_cast<std::map<int, Handle<Script>>&>(scripts);
+    Handle<Script> script = non_const_scripts[node->script_id_];
+    if (!script.is_null()) {
       if (script->name()->IsName()) {
         Name* name = Name::cast(script->name());
         script_name = ToApiHandle<v8::String>(
             isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
       }
-      Handle<Script> script_handle(script);
-      line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
-      column =
-          1 + Script::GetColumnNumber(script_handle, node->script_position_);
+      line = 1 + Script::GetLineNumber(script, node->script_position_);
+      column = 1 + Script::GetColumnNumber(script, node->script_position_);
     }
-    for (auto alloc : node->allocations_) {
-      allocations.push_back(ScaleSample(alloc.first, alloc.second));
-    }
+  }
+  for (auto alloc : node->allocations_) {
+    allocations.push_back(ScaleSample(alloc.first, alloc.second));
   }
 
   profile->nodes().push_back(v8::AllocationProfile::Node(
@@ -227,35 +244,34 @@
        script_name, node->script_id_, node->script_position_, line, column,
        std::vector<v8::AllocationProfile::Node*>(), allocations}));
   v8::AllocationProfile::Node* current = &profile->nodes().back();
-  size_t child_len = node->children_.size();
-  // The children vector may have nodes appended to it during translation
+  // The children map may have nodes inserted into it during translation
   // because the translation may allocate strings on the JS heap that have
-  // the potential to be sampled. We cache the length of the vector before
-  // iteration so that nodes appended to the vector during iteration are
-  // not processed.
-  for (size_t i = 0; i < child_len; i++) {
+  // the potential to be sampled. That's ok since map iterators are not
+  // invalidated upon std::map insertion.
+  for (auto it : node->children_) {
     current->children.push_back(
-        TranslateAllocationNode(profile, node->children_[i], scripts));
+        TranslateAllocationNode(profile, it.second, scripts));
   }
+  node->pinned_ = false;
   return current;
 }
 
 v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
+  if (flags_ & v8::HeapProfiler::kSamplingForceGC) {
+    isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+                                        "SamplingHeapProfiler");
+  }
   // To resolve positions to line/column numbers, we will need to look up
   // scripts. Build a map to allow fast mapping from script id to script.
-  std::map<int, Script*> scripts;
+  std::map<int, Handle<Script>> scripts;
   {
     Script::Iterator iterator(isolate_);
-    Script* script;
-    while ((script = iterator.Next())) {
-      scripts[script->id()] = script;
+    while (Script* script = iterator.Next()) {
+      scripts[script->id()] = handle(script);
     }
   }
-
   auto profile = new v8::internal::AllocationProfile();
-
   TranslateAllocationNode(profile, &profile_root_, scripts);
-
   return profile;
 }
 
diff --git a/src/profiler/sampling-heap-profiler.h b/src/profiler/sampling-heap-profiler.h
index 0b538b0..4b7c366 100644
--- a/src/profiler/sampling-heap-profiler.h
+++ b/src/profiler/sampling-heap-profiler.h
@@ -41,7 +41,7 @@
 class SamplingHeapProfiler {
  public:
   SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
-                       int stack_depth);
+                       int stack_depth, v8::HeapProfiler::SamplingFlags flags);
   ~SamplingHeapProfiler();
 
   v8::AllocationProfile* GetAllocationProfile();
@@ -71,23 +71,47 @@
 
   class AllocationNode {
    public:
-    AllocationNode(const char* const name, int script_id,
-                   const int start_position)
-        : script_id_(script_id),
+    AllocationNode(AllocationNode* parent, const char* name, int script_id,
+                   int start_position)
+        : parent_(parent),
+          script_id_(script_id),
           script_position_(start_position),
-          name_(name) {}
+          name_(name),
+          pinned_(false) {}
     ~AllocationNode() {
       for (auto child : children_) {
-        delete child;
+        delete child.second;
       }
     }
 
    private:
+    typedef uint64_t FunctionId;
+    static FunctionId function_id(int script_id, int start_position,
+                                  const char* name) {
+      // script_id == kNoScriptId case:
+      //   Use function name pointer as an id. Names derived from VM state
+      //   must not collide with the builtin names. The least significant bit
+      //   of the id is set to 1.
+      if (script_id == v8::UnboundScript::kNoScriptId) {
+        return reinterpret_cast<intptr_t>(name) | 1;
+      }
+      // script_id != kNoScriptId case:
+      //   Use script_id, start_position pair to uniquelly identify the node.
+      //   The least significant bit of the id is set to 0.
+      DCHECK(static_cast<unsigned>(start_position) < (1u << 31));
+      return (static_cast<uint64_t>(script_id) << 32) + (start_position << 1);
+    }
+    AllocationNode* FindOrAddChildNode(const char* name, int script_id,
+                                       int start_position);
+    // TODO(alph): make use of unordered_map's here. Pay attention to
+    // iterator invalidation during TranslateAllocationNode.
     std::map<size_t, unsigned int> allocations_;
-    std::vector<AllocationNode*> children_;
+    std::map<FunctionId, AllocationNode*> children_;
+    AllocationNode* const parent_;
     const int script_id_;
     const int script_position_;
     const char* const name_;
+    bool pinned_;
 
     friend class SamplingHeapProfiler;
 
@@ -110,12 +134,10 @@
   // loaded scripts keyed by their script id.
   v8::AllocationProfile::Node* TranslateAllocationNode(
       AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
-      const std::map<int, Script*>& scripts);
+      const std::map<int, Handle<Script>>& scripts);
   v8::AllocationProfile::Allocation ScaleSample(size_t size,
                                                 unsigned int count);
   AllocationNode* AddStack();
-  AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
-                                     int script_id, int start_position);
 
   Isolate* const isolate_;
   Heap* const heap_;
@@ -126,6 +148,7 @@
   std::set<Sample*> samples_;
   const int stack_depth_;
   const uint64_t rate_;
+  v8::HeapProfiler::SamplingFlags flags_;
 
   friend class SamplingAllocationObserver;
 };
diff --git a/src/profiler/strings-storage.h b/src/profiler/strings-storage.h
index 7164cae..0849d63 100644
--- a/src/profiler/strings-storage.h
+++ b/src/profiler/strings-storage.h
@@ -6,6 +6,7 @@
 #define V8_PROFILER_STRINGS_STORAGE_H_
 
 #include "src/allocation.h"
+#include "src/base/compiler-specific.h"
 #include "src/hashmap.h"
 
 namespace v8 {
@@ -19,7 +20,8 @@
   ~StringsStorage();
 
   const char* GetCopy(const char* src);
-  const char* GetFormatted(const char* format, ...);
+  PRINTF_FORMAT(2, 3) const char* GetFormatted(const char* format, ...);
+  PRINTF_FORMAT(2, 0)
   const char* GetVFormatted(const char* format, va_list args);
   const char* GetName(Name* name);
   const char* GetName(int index);
diff --git a/src/profiler/tick-sample.cc b/src/profiler/tick-sample.cc
new file mode 100644
index 0000000..3edd964
--- /dev/null
+++ b/src/profiler/tick-sample.cc
@@ -0,0 +1,233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/tick-sample.h"
+
+#include "src/frames-inl.h"
+#include "src/vm-state-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+bool IsSamePage(byte* ptr1, byte* ptr2) {
+  const uint32_t kPageSize = 4096;
+  uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
+  return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
+         (reinterpret_cast<uintptr_t>(ptr2) & mask);
+}
+
+
+// Check if the code at specified address could potentially be a
+// frame setup code.
+bool IsNoFrameRegion(Address address) {
+  struct Pattern {
+    int bytes_count;
+    byte bytes[8];
+    int offsets[4];
+  };
+  byte* pc = reinterpret_cast<byte*>(address);
+  static Pattern patterns[] = {
+#if V8_HOST_ARCH_IA32
+    // push %ebp
+    // mov %esp,%ebp
+    {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
+    // pop %ebp
+    // ret N
+    {2, {0x5d, 0xc2}, {0, 1, -1}},
+    // pop %ebp
+    // ret
+    {2, {0x5d, 0xc3}, {0, 1, -1}},
+#elif V8_HOST_ARCH_X64
+    // pushq %rbp
+    // movq %rsp,%rbp
+    {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
+    // popq %rbp
+    // ret N
+    {2, {0x5d, 0xc2}, {0, 1, -1}},
+    // popq %rbp
+    // ret
+    {2, {0x5d, 0xc3}, {0, 1, -1}},
+#endif
+    {0, {}, {}}
+  };
+  for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
+    for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
+      int offset = *offset_ptr;
+      if (!offset || IsSamePage(pc, pc - offset)) {
+        MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
+        if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
+          return true;
+      } else {
+        // It is not safe to examine bytes on another page as it might not be
+        // allocated thus causing a SEGFAULT.
+        // Check the pattern part that's on the same page and
+        // pessimistically assume it could be the entire pattern match.
+        MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
+        if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
+          return true;
+      }
+    }
+  }
+  return false;
+}
+
+}  // namespace
+
+
+//
+// StackTracer implementation
+//
+DISABLE_ASAN void TickSample::Init(Isolate* isolate,
+                                   const v8::RegisterState& regs,
+                                   RecordCEntryFrame record_c_entry_frame,
+                                   bool update_stats) {
+  timestamp = base::TimeTicks::HighResolutionNow();
+  pc = reinterpret_cast<Address>(regs.pc);
+  state = isolate->current_vm_state();
+  this->update_stats = update_stats;
+
+  // Avoid collecting traces while doing GC.
+  if (state == GC) return;
+
+  Address js_entry_sp = isolate->js_entry_sp();
+  if (js_entry_sp == 0) return;  // Not executing JS now.
+
+  if (pc && IsNoFrameRegion(pc)) {
+    // Can't collect stack. Mark the sample as spoiled.
+    timestamp = base::TimeTicks();
+    pc = 0;
+    return;
+  }
+
+  ExternalCallbackScope* scope = isolate->external_callback_scope();
+  Address handler = Isolate::handler(isolate->thread_local_top());
+  // If there is a handler on top of the external callback scope then
+  // we have already entrered JavaScript again and the external callback
+  // is not the top function.
+  if (scope && scope->scope_address() < handler) {
+    external_callback_entry = *scope->callback_entrypoint_address();
+    has_external_callback = true;
+  } else {
+    // sp register may point at an arbitrary place in memory, make
+    // sure MSAN doesn't complain about it.
+    MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
+    // Sample potential return address value for frameless invocation of
+    // stubs (we'll figure out later, if this value makes sense).
+    tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
+    has_external_callback = false;
+  }
+
+  SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
+                            reinterpret_cast<Address>(regs.sp), js_entry_sp);
+  top_frame_type = it.top_frame_type();
+
+  SampleInfo info;
+  GetStackSample(isolate, regs, record_c_entry_frame,
+                 reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
+  frames_count = static_cast<unsigned>(info.frames_count);
+  if (!frames_count) {
+    // It is executing JS but failed to collect a stack trace.
+    // Mark the sample as spoiled.
+    timestamp = base::TimeTicks();
+    pc = 0;
+  }
+}
+
+
+void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
+                                RecordCEntryFrame record_c_entry_frame,
+                                void** frames, size_t frames_limit,
+                                v8::SampleInfo* sample_info) {
+  sample_info->frames_count = 0;
+  sample_info->vm_state = isolate->current_vm_state();
+  if (sample_info->vm_state == GC) return;
+
+  Address js_entry_sp = isolate->js_entry_sp();
+  if (js_entry_sp == 0) return;  // Not executing JS now.
+
+  SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
+                            reinterpret_cast<Address>(regs.sp), js_entry_sp);
+  size_t i = 0;
+  if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() &&
+      it.top_frame_type() == StackFrame::EXIT) {
+    frames[i++] = isolate->c_function();
+  }
+  while (!it.done() && i < frames_limit) {
+    if (it.frame()->is_interpreted()) {
+      // For interpreted frames use the bytecode array pointer as the pc.
+      InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
+      // Since the sampler can interrupt execution at any point the
+      // bytecode_array might be garbage, so don't dereference it.
+      Address bytecode_array =
+          reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
+      frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
+                    frame->GetBytecodeOffset();
+    } else {
+      frames[i++] = it.frame()->pc();
+    }
+    it.Advance();
+  }
+  sample_info->frames_count = i;
+}
+
+
+#if defined(USE_SIMULATOR)
+bool SimulatorHelper::FillRegisters(Isolate* isolate,
+                                    v8::RegisterState* state) {
+  Simulator *simulator = isolate->thread_local_top()->simulator_;
+  // Check if there is active simulator.
+  if (simulator == NULL) return false;
+#if V8_TARGET_ARCH_ARM
+  if (!simulator->has_bad_pc()) {
+    state->pc = reinterpret_cast<Address>(simulator->get_pc());
+  }
+  state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+  state->fp = reinterpret_cast<Address>(simulator->get_register(
+      Simulator::r11));
+#elif V8_TARGET_ARCH_ARM64
+  state->pc = reinterpret_cast<Address>(simulator->pc());
+  state->sp = reinterpret_cast<Address>(simulator->sp());
+  state->fp = reinterpret_cast<Address>(simulator->fp());
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+  if (!simulator->has_bad_pc()) {
+    state->pc = reinterpret_cast<Address>(simulator->get_pc());
+  }
+  state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+  state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_PPC
+  if (!simulator->has_bad_pc()) {
+    state->pc = reinterpret_cast<Address>(simulator->get_pc());
+  }
+  state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+  state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_S390
+  if (!simulator->has_bad_pc()) {
+    state->pc = reinterpret_cast<Address>(simulator->get_pc());
+  }
+  state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+  state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#endif
+  if (state->sp == 0 || state->fp == 0) {
+    // It possible that the simulator is interrupted while it is updating
+    // the sp or fp register. ARM64 simulator does this in two steps:
+    // first setting it to zero and then setting it to the new value.
+    // Bailout if sp/fp doesn't contain the new value.
+    //
+    // FIXME: The above doesn't really solve the issue.
+    // If a 64-bit target is executed on a 32-bit host even the final
+    // write is non-atomic, so it might obtain a half of the result.
+    // Moreover as long as the register set code uses memcpy (as of now),
+    // it is not guaranteed to be atomic even when both host and target
+    // are of same bitness.
+    return false;
+  }
+  return true;
+}
+#endif  // USE_SIMULATOR
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/profiler/tick-sample.h b/src/profiler/tick-sample.h
new file mode 100644
index 0000000..fa2cf21
--- /dev/null
+++ b/src/profiler/tick-sample.h
@@ -0,0 +1,76 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_TICK_SAMPLE_H_
+#define V8_PROFILER_TICK_SAMPLE_H_
+
+#include "include/v8.h"
+
+#include "src/base/platform/time.h"
+#include "src/frames.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// ----------------------------------------------------------------------------
+// Sampler
+//
+// A sampler periodically samples the state of the VM and optionally
+// (if used for profiling) the program counter and stack pointer for
+// the thread that created it.
+
+// TickSample captures the information collected for each sample.
+struct TickSample {
+  // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
+  // include the runtime function we're calling. Externally exposed tick
+  // samples don't care.
+  enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
+
+  TickSample()
+      : state(OTHER),
+        pc(NULL),
+        external_callback_entry(NULL),
+        frames_count(0),
+        has_external_callback(false),
+        update_stats(true),
+        top_frame_type(StackFrame::NONE) {}
+  void Init(Isolate* isolate, const v8::RegisterState& state,
+            RecordCEntryFrame record_c_entry_frame, bool update_stats);
+  static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
+                             RecordCEntryFrame record_c_entry_frame,
+                             void** frames, size_t frames_limit,
+                             v8::SampleInfo* sample_info);
+  StateTag state;  // The state of the VM.
+  Address pc;      // Instruction pointer.
+  union {
+    Address tos;   // Top stack value (*sp).
+    Address external_callback_entry;
+  };
+  static const unsigned kMaxFramesCountLog2 = 8;
+  static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
+  Address stack[kMaxFramesCount];  // Call stack.
+  base::TimeTicks timestamp;
+  unsigned frames_count : kMaxFramesCountLog2;  // Number of captured frames.
+  bool has_external_callback : 1;
+  bool update_stats : 1;  // Whether the sample should update aggregated stats.
+  StackFrame::Type top_frame_type : 5;
+};
+
+
+#if defined(USE_SIMULATOR)
+class SimulatorHelper {
+ public:
+  // Returns true if register values were successfully retrieved
+  // from the simulator, otherwise returns false.
+  static bool FillRegisters(Isolate* isolate, v8::RegisterState* state);
+};
+#endif  // USE_SIMULATOR
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PROFILER_TICK_SAMPLE_H_
diff --git a/src/property-details.h b/src/property-details.h
index fdf2c6c..8df7307 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -53,7 +53,18 @@
 STATIC_ASSERT(ONLY_CONFIGURABLE == static_cast<PropertyFilter>(DONT_DELETE));
 STATIC_ASSERT(((SKIP_STRINGS | SKIP_SYMBOLS | ONLY_ALL_CAN_READ) &
                ALL_ATTRIBUTES_MASK) == 0);
-
+STATIC_ASSERT(ALL_PROPERTIES ==
+              static_cast<PropertyFilter>(v8::PropertyFilter::ALL_PROPERTIES));
+STATIC_ASSERT(ONLY_WRITABLE ==
+              static_cast<PropertyFilter>(v8::PropertyFilter::ONLY_WRITABLE));
+STATIC_ASSERT(ONLY_ENUMERABLE ==
+              static_cast<PropertyFilter>(v8::PropertyFilter::ONLY_ENUMERABLE));
+STATIC_ASSERT(ONLY_CONFIGURABLE == static_cast<PropertyFilter>(
+                                       v8::PropertyFilter::ONLY_CONFIGURABLE));
+STATIC_ASSERT(SKIP_STRINGS ==
+              static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_STRINGS));
+STATIC_ASSERT(SKIP_SYMBOLS ==
+              static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_SYMBOLS));
 
 class Smi;
 class Type;
diff --git a/src/regexp/jsregexp.cc b/src/regexp/jsregexp.cc
index ddb4a16..6c50f4e 100644
--- a/src/regexp/jsregexp.cc
+++ b/src/regexp/jsregexp.cc
@@ -5159,8 +5159,10 @@
       ranges = negated;
     }
     if (ranges->length() == 0) {
-      // No matches possible.
-      return new (zone) EndNode(EndNode::BACKTRACK, zone);
+      ranges->Add(CharacterRange::Everything(), zone);
+      RegExpCharacterClass* fail =
+          new (zone) RegExpCharacterClass(ranges, true);
+      return new (zone) TextNode(fail, compiler->read_backward(), on_success);
     }
     if (standard_type() == '*') {
       return UnanchoredAdvance(compiler, on_success);
@@ -5879,6 +5881,7 @@
 void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
                                         ZoneList<CharacterRange>* ranges,
                                         bool is_one_byte) {
+  CharacterRange::Canonicalize(ranges);
   int range_count = ranges->length();
   for (int i = 0; i < range_count; i++) {
     CharacterRange range = ranges->at(i);
@@ -6762,7 +6765,7 @@
   Heap* heap = pattern->GetHeap();
   bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
   if (heap->total_regexp_code_generated() > RegExpImpl::kRegExpCompiledLimit &&
-      heap->isolate()->memory_allocator()->SizeExecutable() >
+      heap->memory_allocator()->SizeExecutable() >
           RegExpImpl::kRegExpExecutableMemoryLimit) {
     too_much = true;
   }
diff --git a/src/regexp/regexp-ast.h b/src/regexp/regexp-ast.h
index 0e718d3..39c9cee 100644
--- a/src/regexp/regexp-ast.h
+++ b/src/regexp/regexp-ast.h
@@ -296,7 +296,10 @@
   bool IsCharacterClass() override;
   bool IsTextElement() override { return true; }
   int min_match() override { return 1; }
-  int max_match() override { return 1; }
+  // The character class may match two code units for unicode regexps.
+  // TODO(yangguo): we should split this class for usage in TextElement, and
+  //                make max_match() dependent on the character class content.
+  int max_match() override { return 2; }
   void AppendToText(RegExpText* text, Zone* zone) override;
   CharacterSet character_set() { return set_; }
   // TODO(lrn): Remove need for complex version if is_standard that
diff --git a/src/regexp/regexp-macro-assembler.cc b/src/regexp/regexp-macro-assembler.cc
index 9bb5073..7fed26e 100644
--- a/src/regexp/regexp-macro-assembler.cc
+++ b/src/regexp/regexp-macro-assembler.cc
@@ -100,6 +100,15 @@
   Bind(&ok);
 }
 
+void RegExpMacroAssembler::CheckPosition(int cp_offset,
+                                         Label* on_outside_input) {
+  LoadCurrentCharacter(cp_offset, on_outside_input, true);
+}
+
+bool RegExpMacroAssembler::CheckSpecialCharacterClass(uc16 type,
+                                                      Label* on_no_match) {
+  return false;
+}
 
 #ifndef V8_INTERPRETED_REGEXP  // Avoid unused code, e.g., on ARM.
 
diff --git a/src/regexp/regexp-macro-assembler.h b/src/regexp/regexp-macro-assembler.h
index 2aa439e..76efdf9 100644
--- a/src/regexp/regexp-macro-assembler.h
+++ b/src/regexp/regexp-macro-assembler.h
@@ -113,12 +113,12 @@
 
   // Checks whether the given offset from the current position is before
   // the end of the string.  May overwrite the current character.
-  virtual void CheckPosition(int cp_offset, Label* on_outside_input) = 0;
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
   // Check whether a standard/default character class matches the current
   // character. Returns false if the type of special character class does
   // not have custom support.
   // May clobber the current loaded character.
-  virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match) = 0;
+  virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
   virtual void Fail() = 0;
   virtual Handle<HeapObject> GetCode(Handle<String> source) = 0;
   virtual void GoTo(Label* label) = 0;
diff --git a/src/regexp/regexp-parser.cc b/src/regexp/regexp-parser.cc
index d433fc8..abb644a 100644
--- a/src/regexp/regexp-parser.cc
+++ b/src/regexp/regexp-parser.cc
@@ -130,6 +130,7 @@
 
 
 RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
+  if (failed_) return NULL;  // Do not overwrite any existing error.
   failed_ = true;
   *error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked();
   // Zip to the end to make sure the no more input is read.
@@ -511,9 +512,8 @@
         break;
       case '{': {
         int dummy;
-        if (ParseIntervalQuantifier(&dummy, &dummy)) {
-          return ReportError(CStrVector("Nothing to repeat"));
-        }
+        bool parsed = ParseIntervalQuantifier(&dummy, &dummy CHECK_FAILED);
+        if (parsed) return ReportError(CStrVector("Nothing to repeat"));
         // fallthrough
       }
       case '}':
@@ -845,29 +845,46 @@
 }
 
 #ifdef V8_I18N_SUPPORT
-bool IsExactPropertyValueAlias(const char* property_name, UProperty property,
-                               int32_t property_value) {
-  const char* short_name =
-      u_getPropertyValueName(property, property_value, U_SHORT_PROPERTY_NAME);
+bool IsExactPropertyAlias(const char* property_name, UProperty property) {
+  const char* short_name = u_getPropertyName(property, U_SHORT_PROPERTY_NAME);
   if (short_name != NULL && strcmp(property_name, short_name) == 0) return true;
   for (int i = 0;; i++) {
-    const char* long_name = u_getPropertyValueName(
-        property, property_value,
-        static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
+    const char* long_name = u_getPropertyName(
+        property, static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
     if (long_name == NULL) break;
     if (strcmp(property_name, long_name) == 0) return true;
   }
   return false;
 }
 
-bool LookupPropertyClass(UProperty property, const char* property_name,
-                         ZoneList<CharacterRange>* result, Zone* zone) {
-  int32_t property_value = u_getPropertyValueEnum(property, property_name);
+bool IsExactPropertyValueAlias(const char* property_value_name,
+                               UProperty property, int32_t property_value) {
+  const char* short_name =
+      u_getPropertyValueName(property, property_value, U_SHORT_PROPERTY_NAME);
+  if (short_name != NULL && strcmp(property_value_name, short_name) == 0) {
+    return true;
+  }
+  for (int i = 0;; i++) {
+    const char* long_name = u_getPropertyValueName(
+        property, property_value,
+        static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
+    if (long_name == NULL) break;
+    if (strcmp(property_value_name, long_name) == 0) return true;
+  }
+  return false;
+}
+
+bool LookupPropertyValueName(UProperty property,
+                             const char* property_value_name,
+                             ZoneList<CharacterRange>* result, Zone* zone) {
+  int32_t property_value =
+      u_getPropertyValueEnum(property, property_value_name);
   if (property_value == UCHAR_INVALID_CODE) return false;
 
   // We require the property name to match exactly to one of the property value
   // aliases. However, u_getPropertyValueEnum uses loose matching.
-  if (!IsExactPropertyValueAlias(property_name, property, property_value)) {
+  if (!IsExactPropertyValueAlias(property_value_name, property,
+                                 property_value)) {
     return false;
   }
 
@@ -892,49 +909,75 @@
   uset_close(set);
   return success;
 }
-#endif  // V8_I18N_SUPPORT
 
 bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result) {
-#ifdef V8_I18N_SUPPORT
-  List<char> property_name_list;
+  // Parse the property class as follows:
+  // - \pN with a single-character N is equivalent to \p{N}
+  // - In \p{name}, 'name' is interpreted
+  //   - either as a general category property value name.
+  //   - or as a binary property name.
+  // - In \p{name=value}, 'name' is interpreted as an enumerated property name,
+  //   and 'value' is interpreted as one of the available property value names.
+  // - Aliases in PropertyAlias.txt and PropertyValueAlias.txt can be used.
+  // - Loose matching is not applied.
+  List<char> first_part;
+  List<char> second_part;
   if (current() == '{') {
-    for (Advance(); current() != '}'; Advance()) {
+    // Parse \p{[PropertyName=]PropertyNameValue}
+    for (Advance(); current() != '}' && current() != '='; Advance()) {
       if (!has_next()) return false;
-      property_name_list.Add(static_cast<char>(current()));
+      first_part.Add(static_cast<char>(current()));
+    }
+    if (current() == '=') {
+      for (Advance(); current() != '}'; Advance()) {
+        if (!has_next()) return false;
+        second_part.Add(static_cast<char>(current()));
+      }
+      second_part.Add(0);  // null-terminate string.
     }
   } else if (current() != kEndMarker) {
-    property_name_list.Add(static_cast<char>(current()));
+    // Parse \pN, where N is a single-character property name value.
+    first_part.Add(static_cast<char>(current()));
   } else {
     return false;
   }
   Advance();
-  property_name_list.Add(0);  // null-terminate string.
+  first_part.Add(0);  // null-terminate string.
 
-  const char* property_name = property_name_list.ToConstVector().start();
-
-#define PROPERTY_NAME_LOOKUP(PROPERTY)                                  \
-  do {                                                                  \
-    if (LookupPropertyClass(PROPERTY, property_name, result, zone())) { \
-      return true;                                                      \
-    }                                                                   \
-  } while (false)
-
-  // General_Category (gc) found in PropertyValueAliases.txt
-  PROPERTY_NAME_LOOKUP(UCHAR_GENERAL_CATEGORY_MASK);
-  // Script (sc) found in Scripts.txt
-  PROPERTY_NAME_LOOKUP(UCHAR_SCRIPT);
-  // To disambiguate from script names, block names have an "In"-prefix.
-  if (property_name_list.length() > 3 && property_name[0] == 'I' &&
-      property_name[1] == 'n') {
-    // Block (blk) found in Blocks.txt
-    property_name += 2;
-    PROPERTY_NAME_LOOKUP(UCHAR_BLOCK);
+  if (second_part.is_empty()) {
+    // First attempt to interpret as general category property value name.
+    const char* name = first_part.ToConstVector().start();
+    if (LookupPropertyValueName(UCHAR_GENERAL_CATEGORY_MASK, name, result,
+                                zone())) {
+      return true;
+    }
+    // Then attempt to interpret as binary property name with value name 'Y'.
+    UProperty property = u_getPropertyEnum(name);
+    if (property < UCHAR_BINARY_START) return false;
+    if (property >= UCHAR_BINARY_LIMIT) return false;
+    if (!IsExactPropertyAlias(name, property)) return false;
+    return LookupPropertyValueName(property, "Y", result, zone());
+  } else {
+    // Both property name and value name are specified. Attempt to interpret
+    // the property name as enumerated property.
+    const char* property_name = first_part.ToConstVector().start();
+    const char* value_name = second_part.ToConstVector().start();
+    UProperty property = u_getPropertyEnum(property_name);
+    if (property < UCHAR_INT_START) return false;
+    if (property >= UCHAR_INT_LIMIT) return false;
+    if (!IsExactPropertyAlias(property_name, property)) return false;
+    return LookupPropertyValueName(property, value_name, result, zone());
   }
-#undef PROPERTY_NAME_LOOKUP
-#endif  // V8_I18N_SUPPORT
+}
+
+#else  // V8_I18N_SUPPORT
+
+bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result) {
   return false;
 }
 
+#endif  // V8_I18N_SUPPORT
+
 bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
   uc32 x = 0;
   int d = HexValue(current());
diff --git a/src/register-configuration.cc b/src/register-configuration.cc
index 2df825a..ab5c692 100644
--- a/src/register-configuration.cc
+++ b/src/register-configuration.cc
@@ -41,7 +41,7 @@
 
 STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
               Register::kNumRegisters);
-STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
+STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
               DoubleRegister::kMaxNumRegisters);
 
 class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
@@ -152,6 +152,8 @@
       allocatable_double_codes_(allocatable_double_codes),
       general_register_names_(general_register_names),
       double_register_names_(double_register_names) {
+  DCHECK(num_general_registers_ <= RegisterConfiguration::kMaxGeneralRegisters);
+  DCHECK(num_double_registers_ <= RegisterConfiguration::kMaxFPRegisters);
   for (int i = 0; i < num_allocatable_general_registers_; ++i) {
     allocatable_general_codes_mask_ |= (1 << allocatable_general_codes_[i]);
   }
diff --git a/src/register-configuration.h b/src/register-configuration.h
index 8ad1d78..c07106e 100644
--- a/src/register-configuration.h
+++ b/src/register-configuration.h
@@ -23,7 +23,7 @@
 
   // Architecture independent maxes.
   static const int kMaxGeneralRegisters = 32;
-  static const int kMaxDoubleRegisters = 32;
+  static const int kMaxFPRegisters = 32;
 
   static const RegisterConfiguration* ArchDefault(CompilerSelector compiler);
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index b76785d..1b571a7 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -20,6 +20,9 @@
 
 
 // Number of times a function has to be seen on the stack before it is
+// compiled for baseline.
+static const int kProfilerTicksBeforeBaseline = 2;
+// Number of times a function has to be seen on the stack before it is
 // optimized.
 static const int kProfilerTicksBeforeOptimization = 2;
 // If the function optimization was disabled due to high deoptimization count,
@@ -88,13 +91,13 @@
   }
 }
 
-
-void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
+static void TraceRecompile(JSFunction* function, const char* reason,
+                           const char* type) {
   if (FLAG_trace_opt &&
       function->shared()->PassesFilter(FLAG_hydrogen_filter)) {
     PrintF("[marking ");
     function->ShortPrint();
-    PrintF(" for recompilation, reason: %s", reason);
+    PrintF(" for %s recompilation, reason: %s", type, reason);
     if (FLAG_type_info_threshold > 0) {
       int typeinfo, generic, total, type_percentage, generic_percentage;
       GetICCounts(function->shared(), &typeinfo, &generic, &total,
@@ -105,10 +108,27 @@
     }
     PrintF("]\n");
   }
+}
 
+void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
+  TraceRecompile(function, reason, "optimized");
+
+  // TODO(4280): Fix this to check function is compiled to baseline once we
+  // have a standard way to check that. For now, if baseline code doesn't have
+  // a bytecode array.
+  DCHECK(!function->shared()->HasBytecodeArray());
   function->AttemptConcurrentOptimization();
 }
 
+void RuntimeProfiler::Baseline(JSFunction* function, const char* reason) {
+  TraceRecompile(function, reason, "baseline");
+
+  // TODO(4280): Fix this to check function is compiled for the interpreter
+  // once we have a standard way to check that. For now function will only
+  // have a bytecode array if compiled for the interpreter.
+  DCHECK(function->shared()->HasBytecodeArray());
+  function->MarkForBaseline();
+}
 
 void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
                                                 int loop_nesting_levels) {
@@ -235,8 +255,7 @@
   }
 }
 
-void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
-                                            bool frame_optimized) {
+void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function) {
   if (function->IsInOptimizationQueue()) return;
 
   SharedFunctionInfo* shared = function->shared();
@@ -247,48 +266,22 @@
   // TODO(rmcilroy): Consider whether we should optimize small functions when
   // they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
 
-  if (!frame_optimized && (function->IsMarkedForOptimization() ||
-                           function->IsMarkedForConcurrentOptimization() ||
-                           function->IsOptimized())) {
+  if (function->IsMarkedForBaseline() || function->IsMarkedForOptimization() ||
+      function->IsMarkedForConcurrentOptimization() ||
+      function->IsOptimized()) {
     // TODO(rmcilroy): Support OSR in these cases.
-
     return;
   }
 
-  // Do not optimize non-optimizable functions.
-  if (shared->optimization_disabled()) {
-    if (shared->deopt_count() >= FLAG_max_opt_count) {
-      // If optimization was disabled due to many deoptimizations,
-      // then check if the function is hot and try to reenable optimization.
-      if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
-        shared->set_profiler_ticks(0);
-        shared->TryReenableOptimization();
-      }
-    }
+  if (shared->optimization_disabled() &&
+      shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
+    // Don't baseline functions which have been marked by NeverOptimizeFunction
+    // in a test.
     return;
   }
 
-  if (function->IsOptimized()) return;
-
-  if (ticks >= kProfilerTicksBeforeOptimization) {
-    int typeinfo, generic, total, type_percentage, generic_percentage;
-    GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
-                &generic_percentage);
-    if (type_percentage >= FLAG_type_info_threshold &&
-        generic_percentage <= FLAG_generic_ic_threshold) {
-      // If this particular function hasn't had any ICs patched for enough
-      // ticks, optimize it now.
-      Optimize(function, "hot and stable");
-    } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
-      Optimize(function, "not much type info but very hot");
-    } else {
-      if (FLAG_trace_opt_verbose) {
-        PrintF("[not yet optimizing ");
-        function->PrintName();
-        PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
-               type_percentage);
-      }
-    }
+  if (ticks >= kProfilerTicksBeforeBaseline) {
+    Baseline(function, "hot enough for baseline");
   }
 }
 
@@ -320,8 +313,9 @@
       }
     }
 
-    if (FLAG_ignition) {
-      MaybeOptimizeIgnition(function, frame->is_optimized());
+    if (frame->is_interpreted()) {
+      DCHECK(!frame->is_optimized());
+      MaybeOptimizeIgnition(function);
     } else {
       MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized());
     }
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index aa2f65e..499f0be 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -8,16 +8,10 @@
 #include "src/allocation.h"
 
 namespace v8 {
-
-namespace base {
-class Semaphore;
-}
-
 namespace internal {
 
 class Isolate;
 class JSFunction;
-class Object;
 
 class RuntimeProfiler {
  public:
@@ -32,8 +26,9 @@
  private:
   void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count,
                                 bool frame_optimized);
-  void MaybeOptimizeIgnition(JSFunction* function, bool frame_optimized);
+  void MaybeOptimizeIgnition(JSFunction* function);
   void Optimize(JSFunction* function, const char* reason);
+  void Baseline(JSFunction* function, const char* reason);
 
   bool CodeSizeOKForOSR(Code* shared_code);
 
diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc
index ab436c2..519df77 100644
--- a/src/runtime/runtime-array.cc
+++ b/src/runtime/runtime-array.cc
@@ -213,7 +213,7 @@
     }
     accumulator.NextPrototype();
     Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
-    JSObject::CollectOwnElementKeys(current, &accumulator, ALL_PROPERTIES);
+    accumulator.CollectOwnElementIndices(current);
   }
   // Erase any keys >= length.
   Handle<FixedArray> keys = accumulator.GetKeys(KEEP_NUMBERS);
@@ -455,6 +455,15 @@
   return isolate->heap()->false_value();
 }
 
+// ES6 22.1.2.2 Array.isArray
+RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
+  HandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  Maybe<bool> result = Object::IsArray(object);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
 
 RUNTIME_FUNCTION(Runtime_IsArray) {
   SealHandleScope shs(isolate);
@@ -463,7 +472,6 @@
   return isolate->heap()->ToBoolean(obj->IsJSArray());
 }
 
-
 RUNTIME_FUNCTION(Runtime_HasCachedArrayIndex) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 1);
diff --git a/src/runtime/runtime-atomics.cc b/src/runtime/runtime-atomics.cc
index 94d98d4..dd309f7 100644
--- a/src/runtime/runtime-atomics.cc
+++ b/src/runtime/runtime-atomics.cc
@@ -33,18 +33,6 @@
 }
 
 template <typename T>
-inline T LoadSeqCst(T* p) {
-  T result;
-  __atomic_load(p, &result, __ATOMIC_SEQ_CST);
-  return result;
-}
-
-template <typename T>
-inline void StoreSeqCst(T* p, T value) {
-  __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
 inline T AddSeqCst(T* p, T value) {
   return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
 }
@@ -116,11 +104,6 @@
     return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
                                               bit_cast<vctype>(newval),     \
                                               bit_cast<vctype>(oldval));    \
-  }                                                                         \
-  inline type LoadSeqCst(type* p) { return *p; }                            \
-  inline void StoreSeqCst(type* p, type value) {                            \
-    InterlockedExchange##suffix(reinterpret_cast<vctype*>(p),               \
-                                bit_cast<vctype>(value));                   \
   }
 
 ATOMIC_OPS(int8_t, 8, char)
@@ -216,22 +199,6 @@
 
 
 template <typename T>
-inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
-  T result = LoadSeqCst(static_cast<T*>(buffer) + index);
-  return ToObject(isolate, result);
-}
-
-
-template <typename T>
-inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
-                       Handle<Object> obj) {
-  T value = FromObject<T>(obj);
-  StoreSeqCst(static_cast<T*>(buffer) + index, value);
-  return *obj;
-}
-
-
-template <typename T>
 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
                      Handle<Object> obj) {
   T value = FromObject<T>(obj);
@@ -307,15 +274,6 @@
 }
 
 
-inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
-                                   Handle<Object> obj) {
-  typedef int32_t convert_type;
-  uint8_t value = ClampToUint8(FromObject<convert_type>(obj));
-  StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value);
-  return *obj;
-}
-
-
 #define DO_UINT8_CLAMPED_OP(name, op)                                        \
   inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer,      \
                                         size_t index, Handle<Object> obj) {  \
@@ -365,6 +323,29 @@
   V(Uint32, uint32, UINT32, uint32_t, 4) \
   V(Int32, int32, INT32, int32_t, 4)
 
+RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError(MessageTemplate::kNotIntegerSharedTypedArray, value));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kNotInt32SharedTypedArray, value));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewRangeError(MessageTemplate::kInvalidAtomicAccessIndex));
+}
 
 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
   HandleScope scope(isolate);
@@ -400,69 +381,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
-  CONVERT_SIZE_ARG_CHECKED(index, 1);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
-
-  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
-                    NumberToSize(isolate, sta->byte_offset());
-
-  switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
-  case kExternal##Type##Array:                              \
-    return DoLoad<ctype>(isolate, source, index);
-
-    INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
-    case kExternalUint8ClampedArray:
-      return DoLoad<uint8_t>(isolate, source, index);
-
-    default:
-      break;
-  }
-
-  UNREACHABLE();
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_AtomicsStore) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
-  CONVERT_SIZE_ARG_CHECKED(index, 1);
-  CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
-  RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
-  RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
-
-  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
-                    NumberToSize(isolate, sta->byte_offset());
-
-  switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
-  case kExternal##Type##Array:                              \
-    return DoStore<ctype>(isolate, source, index, value);
-
-    INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
-    case kExternalUint8ClampedArray:
-      return DoStoreUint8Clamped(isolate, source, index, value);
-
-    default:
-      break;
-  }
-
-  UNREACHABLE();
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
diff --git a/src/runtime/runtime-classes.cc b/src/runtime/runtime-classes.cc
index 3f10225..a784d6d 100644
--- a/src/runtime/runtime-classes.cc
+++ b/src/runtime/runtime-classes.cc
@@ -194,14 +194,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, constructor, 0);
-  JSObject::MigrateSlowToFast(constructor, 0, "RuntimeToFastProperties");
-  return *constructor;
-}
-
 static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
                                          Handle<Object> receiver,
                                          Handle<JSObject> home_object,
diff --git a/src/runtime/runtime-collections.cc b/src/runtime/runtime-collections.cc
index 32340e5..65690df 100644
--- a/src/runtime/runtime-collections.cc
+++ b/src/runtime/runtime-collections.cc
@@ -368,14 +368,5 @@
   }
   return *isolate->factory()->NewJSArrayWithElements(values);
 }
-
-
-RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
-  JSWeakCollection::Initialize(weakmap, isolate);
-  return *weakmap;
-}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index 89a6fa1..c8fc9e8 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -19,7 +19,7 @@
 
 RUNTIME_FUNCTION(Runtime_CompileLazy) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
 
 #ifdef DEBUG
@@ -39,10 +39,22 @@
   return function->code();
 }
 
+RUNTIME_FUNCTION(Runtime_CompileBaseline) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  StackLimitCheck check(isolate);
+  if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
+  if (!Compiler::CompileBaseline(function)) {
+    return isolate->heap()->exception();
+  }
+  DCHECK(function->is_compiled());
+  return function->code();
+}
 
 RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   StackLimitCheck check(isolate);
   if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
@@ -56,7 +68,7 @@
 
 RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   StackLimitCheck check(isolate);
   if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
@@ -305,11 +317,10 @@
   }
 }
 
-
 static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
                                  Handle<SharedFunctionInfo> outer_info,
                                  LanguageMode language_mode,
-                                 int scope_position) {
+                                 int eval_scope_position, int eval_position) {
   Handle<Context> context = Handle<Context>(isolate->context());
   Handle<Context> native_context = Handle<Context>(context->native_context());
 
@@ -331,9 +342,9 @@
   static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
   Handle<JSFunction> compiled;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, compiled,
-      Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
-                                    restriction, scope_position),
+      isolate, compiled, Compiler::GetFunctionFromEval(
+                             source, outer_info, context, language_mode,
+                             restriction, eval_scope_position, eval_position),
       isolate->heap()->exception());
   return *compiled;
 }
@@ -341,7 +352,7 @@
 
 RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 5);
+  DCHECK(args.length() == 6);
 
   Handle<Object> callee = args.at<Object>(0);
 
@@ -362,7 +373,7 @@
   Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
                                         isolate);
   return CompileGlobalEval(isolate, args.at<String>(1), outer_info,
-                           language_mode, args.smi_at(4));
+                           language_mode, args.smi_at(4), args.smi_at(5));
 }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index ad8375a..e3f3beb 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -42,6 +42,9 @@
   JavaScriptFrameIterator it(isolate);
   isolate->debug()->Break(it.frame());
 
+  // If live-edit has dropped frames, we are not going back to dispatch.
+  if (LiveEdit::SetAfterBreakTarget(isolate->debug())) return Smi::FromInt(0);
+
   // Return the handler from the original bytecode array.
   DCHECK(it.frame()->is_interpreted());
   InterpretedFrame* interpreted_frame =
@@ -244,7 +247,7 @@
     Handle<JSObject> promise = Handle<JSObject>::cast(object);
 
     Handle<Object> status_obj =
-        DebugGetProperty(promise, isolate->factory()->promise_status_symbol());
+        DebugGetProperty(promise, isolate->factory()->promise_state_symbol());
     RUNTIME_ASSERT_HANDLIFIED(status_obj->IsSmi(), JSArray);
     const char* status = "rejected";
     int status_val = Handle<Smi>::cast(status_obj)->value();
@@ -267,12 +270,31 @@
     result->set(1, *status_str);
 
     Handle<Object> value_obj =
-        DebugGetProperty(promise, isolate->factory()->promise_value_symbol());
+        DebugGetProperty(promise, isolate->factory()->promise_result_symbol());
     Handle<String> promise_value =
         factory->NewStringFromAsciiChecked("[[PromiseValue]]");
     result->set(2, *promise_value);
     result->set(3, *value_obj);
     return factory->NewJSArrayWithElements(result);
+  } else if (object->IsJSProxy()) {
+    Handle<JSProxy> js_proxy = Handle<JSProxy>::cast(object);
+    Handle<FixedArray> result = factory->NewFixedArray(3 * 2);
+
+    Handle<String> handler_str =
+        factory->NewStringFromAsciiChecked("[[Handler]]");
+    result->set(0, *handler_str);
+    result->set(1, js_proxy->handler());
+
+    Handle<String> target_str =
+        factory->NewStringFromAsciiChecked("[[Target]]");
+    result->set(2, *target_str);
+    result->set(3, js_proxy->target());
+
+    Handle<String> is_revoked_str =
+        factory->NewStringFromAsciiChecked("[[IsRevoked]]");
+    result->set(4, *is_revoked_str);
+    result->set(5, isolate->heap()->ToBoolean(js_proxy->IsRevoked()));
+    return factory->NewJSArrayWithElements(result);
   } else if (object->IsJSValue()) {
     Handle<JSValue> js_value = Handle<JSValue>::cast(object);
 
@@ -339,7 +361,7 @@
     return *isolate->factory()->NewJSArrayWithElements(details);
   }
 
-  LookupIterator it(obj, name, LookupIterator::HIDDEN);
+  LookupIterator it(obj, name, LookupIterator::OWN);
   bool has_caught = false;
   Handle<Object> value = DebugGetProperty(&it, &has_caught);
   if (!it.IsFound()) return isolate->heap()->undefined_value();
@@ -410,50 +432,6 @@
 }
 
 
-// Return the property insertion index calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(Runtime_DebugPropertyIndexFromDetails) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
-  // TODO(verwaest): Works only for dictionary mode holders.
-  return Smi::FromInt(details.dictionary_index());
-}
-
-
-// Return property value from named interceptor.
-// args[0]: object
-// args[1]: property name
-RUNTIME_FUNCTION(Runtime_DebugNamedInterceptorPropertyValue) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  RUNTIME_ASSERT(obj->HasNamedInterceptor());
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     JSObject::GetProperty(obj, name));
-  return *result;
-}
-
-
-// Return element value from indexed interceptor.
-// args[0]: object
-// args[1]: index
-RUNTIME_FUNCTION(Runtime_DebugIndexedInterceptorElementValue) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  RUNTIME_ASSERT(obj->HasIndexedInterceptor());
-  CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, JSReceiver::GetElement(isolate, obj, index));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_CheckExecutionState) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 1);
@@ -571,7 +549,9 @@
   for (int slot = 0; slot < scope_info->LocalCount(); ++slot) {
     // Hide compiler-introduced temporary variables, whether on the stack or on
     // the context.
-    if (scope_info->LocalIsSynthetic(slot)) local_count--;
+    if (ScopeInfo::VariableIsSynthetic(scope_info->LocalName(slot))) {
+      local_count--;
+    }
   }
 
   Handle<FixedArray> locals =
@@ -582,7 +562,7 @@
   int i = 0;
   for (; i < scope_info->StackLocalCount(); ++i) {
     // Use the value from the stack.
-    if (scope_info->LocalIsSynthetic(i)) continue;
+    if (ScopeInfo::VariableIsSynthetic(scope_info->LocalName(i))) continue;
     locals->set(local * 2, scope_info->LocalName(i));
     Handle<Object> value = frame_inspector.GetExpression(i);
     // TODO(yangguo): We convert optimized out values to {undefined} when they
@@ -596,8 +576,8 @@
     Handle<Context> context(
         Handle<Context>::cast(frame_inspector.GetContext())->closure_context());
     for (; i < scope_info->LocalCount(); ++i) {
-      if (scope_info->LocalIsSynthetic(i)) continue;
       Handle<String> name(scope_info->LocalName(i));
+      if (ScopeInfo::VariableIsSynthetic(*name)) continue;
       VariableMode mode;
       InitializationFlag init_flag;
       MaybeAssignedFlag maybe_assigned_flag;
@@ -958,78 +938,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_GetThreadCount) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
-  RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
-
-  // Count all archived V8 threads.
-  int n = 0;
-  for (ThreadState* thread = isolate->thread_manager()->FirstThreadStateInUse();
-       thread != NULL; thread = thread->Next()) {
-    n++;
-  }
-
-  // Total number of threads is current thread and archived threads.
-  return Smi::FromInt(n + 1);
-}
-
-
-static const int kThreadDetailsCurrentThreadIndex = 0;
-static const int kThreadDetailsThreadIdIndex = 1;
-static const int kThreadDetailsSize = 2;
-
-// Return an array with thread details
-// args[0]: number: break id
-// args[1]: number: thread index
-//
-// The array returned contains the following information:
-// 0: Is current thread?
-// 1: Thread id
-RUNTIME_FUNCTION(Runtime_GetThreadDetails) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
-  RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
-
-  CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
-
-  // Allocate array for result.
-  Handle<FixedArray> details =
-      isolate->factory()->NewFixedArray(kThreadDetailsSize);
-
-  // Thread index 0 is current thread.
-  if (index == 0) {
-    // Fill the details.
-    details->set(kThreadDetailsCurrentThreadIndex,
-                 isolate->heap()->true_value());
-    details->set(kThreadDetailsThreadIdIndex,
-                 Smi::FromInt(ThreadId::Current().ToInteger()));
-  } else {
-    // Find the thread with the requested index.
-    int n = 1;
-    ThreadState* thread = isolate->thread_manager()->FirstThreadStateInUse();
-    while (index != n && thread != NULL) {
-      thread = thread->Next();
-      n++;
-    }
-    if (thread == NULL) {
-      return isolate->heap()->undefined_value();
-    }
-
-    // Fill the details.
-    details->set(kThreadDetailsCurrentThreadIndex,
-                 isolate->heap()->false_value());
-    details->set(kThreadDetailsThreadIdIndex,
-                 Smi::FromInt(thread->id().ToInteger()));
-  }
-
-  // Convert to JS array and return.
-  return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
 // Sets the disable break state
 // args[0]: disable break state
 RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
@@ -1292,10 +1200,7 @@
   }
 
   // Return result as a JS array.
-  Handle<JSObject> result =
-      isolate->factory()->NewJSObject(isolate->array_function());
-  JSArray::SetContent(Handle<JSArray>::cast(result), instances);
-  return *result;
+  return *isolate->factory()->NewJSArrayWithElements(instances);
 }
 
 static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate,
@@ -1454,11 +1359,14 @@
 
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
 
+  Handle<Object> name;
   if (function->IsJSBoundFunction()) {
-    return Handle<JSBoundFunction>::cast(function)->name();
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, name, JSBoundFunction::GetName(
+                           isolate, Handle<JSBoundFunction>::cast(function)));
+  } else {
+    name = JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
   }
-  Handle<Object> name =
-      JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
   return *name;
 }
 
@@ -1598,18 +1506,9 @@
 // built-in function such as Array.forEach to enable stepping into the callback,
 // if we are indeed stepping and the callback is subject to debugging.
 RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
-  DCHECK(args.length() == 1);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  RUNTIME_ASSERT(object->IsJSFunction() || object->IsJSGeneratorObject());
-  Handle<JSFunction> fun;
-  if (object->IsJSFunction()) {
-    fun = Handle<JSFunction>::cast(object);
-  } else {
-    fun = Handle<JSFunction>(
-        Handle<JSGeneratorObject>::cast(object)->function(), isolate);
-  }
-
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   isolate->debug()->PrepareStepIn(fun);
   return isolate->heap()->undefined_value();
 }
diff --git a/src/runtime/runtime-function.cc b/src/runtime/runtime-function.cc
index 011f9ff..56cf3b6 100644
--- a/src/runtime/runtime-function.cc
+++ b/src/runtime/runtime-function.cc
@@ -11,6 +11,7 @@
 #include "src/isolate-inl.h"
 #include "src/messages.h"
 #include "src/profiler/cpu-profiler.h"
+#include "src/wasm/wasm-module.h"
 
 namespace v8 {
 namespace internal {
@@ -20,16 +21,15 @@
   DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
-  if (function->IsJSBoundFunction()) {
   Handle<Object> result;
+  if (function->IsJSBoundFunction()) {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, result, JSBoundFunction::GetName(
                              isolate, Handle<JSBoundFunction>::cast(function)));
-    return *result;
   } else {
-    RUNTIME_ASSERT(function->IsJSFunction());
-    return Handle<JSFunction>::cast(function)->shared()->name();
+    result = JSFunction::GetName(isolate, Handle<JSFunction>::cast(function));
   }
+  return *result;
 }
 
 
@@ -289,10 +289,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
-  if (receiver->IsNull() || receiver->IsUndefined()) {
-    return isolate->global_proxy();
-  }
-  return *Object::ToObject(isolate, receiver).ToHandleChecked();
+  return *Object::ConvertReceiver(isolate, receiver).ToHandleChecked();
 }
 
 
@@ -314,5 +311,15 @@
              : *JSFunction::ToString(Handle<JSFunction>::cast(function));
 }
 
+RUNTIME_FUNCTION(Runtime_WasmGetFunctionName) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, wasm, 0);
+  CONVERT_SMI_ARG_CHECKED(func_index, 1);
+
+  return *wasm::GetWasmFunctionName(wasm, func_index);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-generator.cc b/src/runtime/runtime-generator.cc
index 181b5f9..7ff7fc8 100644
--- a/src/runtime/runtime-generator.cc
+++ b/src/runtime/runtime-generator.cc
@@ -14,22 +14,27 @@
 
 RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+  RUNTIME_ASSERT(function->shared()->is_resumable());
 
-  JavaScriptFrameIterator it(isolate);
-  JavaScriptFrame* frame = it.frame();
-  Handle<JSFunction> function(frame->function());
-  RUNTIME_ASSERT(function->shared()->is_generator());
+  Handle<FixedArray> operand_stack;
+  if (FLAG_ignition && FLAG_ignition_generators) {
+    int size = function->shared()->bytecode_array()->register_count();
+    operand_stack = isolate->factory()->NewFixedArray(size);
+  } else {
+    DCHECK(!function->shared()->HasBytecodeArray());
+    operand_stack = handle(isolate->heap()->empty_fixed_array());
+  }
 
-  Handle<JSGeneratorObject> generator;
-  DCHECK(!frame->IsConstructor());
-  generator = isolate->factory()->NewJSGeneratorObject(function);
+  Handle<JSGeneratorObject> generator =
+      isolate->factory()->NewJSGeneratorObject(function);
   generator->set_function(*function);
-  generator->set_context(Context::cast(frame->context()));
-  generator->set_receiver(frame->receiver());
-  generator->set_continuation(0);
-  generator->set_operand_stack(isolate->heap()->empty_fixed_array());
-
+  generator->set_context(isolate->context());
+  generator->set_receiver(*receiver);
+  generator->set_operand_stack(*operand_stack);
+  generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
   return *generator;
 }
 
@@ -41,7 +46,7 @@
 
   JavaScriptFrameIterator stack_iterator(isolate);
   JavaScriptFrame* frame = stack_iterator.frame();
-  RUNTIME_ASSERT(frame->function()->shared()->is_generator());
+  RUNTIME_ASSERT(frame->function()->shared()->is_resumable());
   DCHECK_EQ(frame->function(), generator_object->function());
   DCHECK(frame->function()->shared()->is_compiled());
   DCHECK(!frame->function()->IsOptimized());
@@ -73,62 +78,6 @@
 }
 
 
-// Note that this function is the slow path for resuming generators.  It is only
-// called if the suspended activation had operands on the stack, stack handlers
-// needing rewinding, or if the resume should throw an exception.  The fast path
-// is handled directly in FullCodeGenerator::EmitGeneratorResume(), which is
-// inlined into GeneratorNext, GeneratorReturn, and GeneratorThrow.
-// EmitGeneratorResume is called in any case, as it needs to reconstruct the
-// stack frame and make space for arguments and operands.
-RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
-  CONVERT_ARG_CHECKED(Object, value, 1);
-  CONVERT_SMI_ARG_CHECKED(resume_mode_int, 2);
-  JavaScriptFrameIterator stack_iterator(isolate);
-  JavaScriptFrame* frame = stack_iterator.frame();
-
-  DCHECK_EQ(frame->function(), generator_object->function());
-  DCHECK(frame->function()->shared()->is_compiled());
-  DCHECK(!frame->function()->IsOptimized());
-
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-
-  Code* code = generator_object->function()->shared()->code();
-  int offset = generator_object->continuation();
-  DCHECK_GT(offset, 0);
-  frame->set_pc(code->instruction_start() + offset);
-  if (FLAG_enable_embedded_constant_pool) {
-    frame->set_constant_pool(code->constant_pool());
-  }
-  generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
-
-  FixedArray* operand_stack = generator_object->operand_stack();
-  int operands_count = operand_stack->length();
-  if (operands_count != 0) {
-    frame->RestoreOperandStack(operand_stack);
-    generator_object->set_operand_stack(isolate->heap()->empty_fixed_array());
-  }
-
-  JSGeneratorObject::ResumeMode resume_mode =
-      static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int);
-  switch (resume_mode) {
-    // Note: this looks like NEXT and RETURN are the same but RETURN receives
-    // special treatment in the generator code (to which we return here).
-    case JSGeneratorObject::NEXT:
-    case JSGeneratorObject::RETURN:
-      return value;
-    case JSGeneratorObject::THROW:
-      return isolate->Throw(value);
-  }
-
-  UNREACHABLE();
-  return isolate->ThrowIllegalOperation();
-}
-
-
 RUNTIME_FUNCTION(Runtime_GeneratorClose) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -170,7 +119,26 @@
 }
 
 
-// Returns generator continuation as a PC offset, or the magic -1 or 0 values.
+// Returns resume mode of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  return Smi::FromInt(generator->resume_mode());
+}
+
+
+RUNTIME_FUNCTION(Runtime_GeneratorSetContext) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  generator->set_context(isolate->context());
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -180,6 +148,45 @@
 }
 
 
+RUNTIME_FUNCTION(Runtime_GeneratorSetContinuation) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+  CONVERT_SMI_ARG_CHECKED(continuation, 1);
+
+  generator->set_continuation(continuation);
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_GeneratorLoadRegister) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+  CONVERT_SMI_ARG_CHECKED(index, 1);
+
+  DCHECK(FLAG_ignition && FLAG_ignition_generators);
+  DCHECK(generator->function()->shared()->HasBytecodeArray());
+
+  return generator->operand_stack()->get(index);
+}
+
+
+RUNTIME_FUNCTION(Runtime_GeneratorStoreRegister) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+  CONVERT_SMI_ARG_CHECKED(index, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+
+  DCHECK(FLAG_ignition && FLAG_ignition_generators);
+  DCHECK(generator->function()->shared()->HasBytecodeArray());
+
+  generator->operand_stack()->set(index, *value);
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -195,23 +202,5 @@
   return isolate->heap()->undefined_value();
 }
 
-// Optimization for builtins calling any of the following three functions is
-// disabled in js/generator.js and compiler.cc, hence they are unreachable.
-
-RUNTIME_FUNCTION(Runtime_GeneratorNext) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-RUNTIME_FUNCTION(Runtime_GeneratorReturn) {
-  UNREACHABLE();
-  return nullptr;
-}
-
-RUNTIME_FUNCTION(Runtime_GeneratorThrow) {
-  UNREACHABLE();
-  return nullptr;
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-i18n.cc b/src/runtime/runtime-i18n.cc
index 27f970b..14974e8 100644
--- a/src/runtime/runtime-i18n.cc
+++ b/src/runtime/runtime-i18n.cc
@@ -24,21 +24,42 @@
 #include "unicode/dtfmtsym.h"
 #include "unicode/dtptngen.h"
 #include "unicode/locid.h"
+#include "unicode/normalizer2.h"
 #include "unicode/numfmt.h"
 #include "unicode/numsys.h"
 #include "unicode/rbbi.h"
 #include "unicode/smpdtfmt.h"
 #include "unicode/timezone.h"
+#include "unicode/translit.h"
 #include "unicode/uchar.h"
 #include "unicode/ucol.h"
 #include "unicode/ucurr.h"
 #include "unicode/uloc.h"
+#include "unicode/unistr.h"
 #include "unicode/unum.h"
 #include "unicode/uversion.h"
 
 
 namespace v8 {
 namespace internal {
+namespace {
+
+const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
+                                    base::SmartArrayPointer<uc16>* dest,
+                                    int32_t length) {
+  DCHECK(flat.IsFlat());
+  if (flat.IsOneByte()) {
+    if (dest->is_empty()) {
+      dest->Reset(NewArray<uc16>(length));
+      CopyChars(dest->get(), flat.ToOneByteVector().start(), length);
+    }
+    return reinterpret_cast<const UChar*>(dest->get());
+  } else {
+    return reinterpret_cast<const UChar*>(flat.ToUC16Vector().start());
+  }
+}
+
+}  // namespace
 
 RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
   HandleScope scope(isolate);
@@ -336,9 +357,9 @@
 
   // Make object handle weak so we can delete the data format once GC kicks in.
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
-  GlobalHandles::MakeWeak(wrapper.location(),
-                          reinterpret_cast<void*>(wrapper.location()),
-                          DateFormat::DeleteDateFormat);
+  GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+                          DateFormat::DeleteDateFormat,
+                          WeakCallbackType::kInternalFields);
   return *local_object;
 }
 
@@ -430,9 +451,9 @@
   JSObject::AddProperty(local_object, key, value, NONE);
 
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
-  GlobalHandles::MakeWeak(wrapper.location(),
-                          reinterpret_cast<void*>(wrapper.location()),
-                          NumberFormat::DeleteNumberFormat);
+  GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+                          NumberFormat::DeleteNumberFormat,
+                          WeakCallbackType::kInternalFields);
   return *local_object;
 }
 
@@ -536,9 +557,9 @@
   JSObject::AddProperty(local_object, key, value, NONE);
 
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
-  GlobalHandles::MakeWeak(wrapper.location(),
-                          reinterpret_cast<void*>(wrapper.location()),
-                          Collator::DeleteCollator);
+  GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+                          Collator::DeleteCollator,
+                          WeakCallbackType::kInternalFields);
   return *local_object;
 }
 
@@ -555,14 +576,20 @@
   icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
   if (!collator) return isolate->ThrowIllegalOperation();
 
-  v8::String::Value string_value1(v8::Utils::ToLocal(string1));
-  v8::String::Value string_value2(v8::Utils::ToLocal(string2));
-  const UChar* u_string1 = reinterpret_cast<const UChar*>(*string_value1);
-  const UChar* u_string2 = reinterpret_cast<const UChar*>(*string_value2);
+  string1 = String::Flatten(string1);
+  string2 = String::Flatten(string2);
+  DisallowHeapAllocation no_gc;
+  int32_t length1 = string1->length();
+  int32_t length2 = string2->length();
+  String::FlatContent flat1 = string1->GetFlatContent();
+  String::FlatContent flat2 = string2->GetFlatContent();
+  base::SmartArrayPointer<uc16> sap1;
+  base::SmartArrayPointer<uc16> sap2;
+  const UChar* string_val1 = GetUCharBufferFromFlat(flat1, &sap1, length1);
+  const UChar* string_val2 = GetUCharBufferFromFlat(flat2, &sap2, length2);
   UErrorCode status = U_ZERO_ERROR;
   UCollationResult result =
-      collator->compare(u_string1, string_value1.length(), u_string2,
-                        string_value2.length(), status);
+      collator->compare(string_val1, length1, string_val2, length2, status);
   if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
 
   return *isolate->factory()->NewNumberFromInt(result);
@@ -571,25 +598,51 @@
 
 RUNTIME_FUNCTION(Runtime_StringNormalize) {
   HandleScope scope(isolate);
-  static const UNormalizationMode normalizationForms[] = {
-      UNORM_NFC, UNORM_NFD, UNORM_NFKC, UNORM_NFKD};
+  static const struct {
+    const char* name;
+    UNormalization2Mode mode;
+  } normalizationForms[] = {
+      {"nfc", UNORM2_COMPOSE},
+      {"nfc", UNORM2_DECOMPOSE},
+      {"nfkc", UNORM2_COMPOSE},
+      {"nfkc", UNORM2_DECOMPOSE},
+  };
 
   DCHECK(args.length() == 2);
 
-  CONVERT_ARG_HANDLE_CHECKED(String, stringValue, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
   RUNTIME_ASSERT(form_id >= 0 &&
                  static_cast<size_t>(form_id) < arraysize(normalizationForms));
 
-  v8::String::Value string_value(v8::Utils::ToLocal(stringValue));
-  const UChar* u_value = reinterpret_cast<const UChar*>(*string_value);
-
-  // TODO(mnita): check Normalizer2 (not available in ICU 46)
-  UErrorCode status = U_ZERO_ERROR;
-  icu::UnicodeString input(false, u_value, string_value.length());
+  int length = s->length();
+  s = String::Flatten(s);
   icu::UnicodeString result;
-  icu::Normalizer::normalize(input, normalizationForms[form_id], 0, result,
-                             status);
+  base::SmartArrayPointer<uc16> sap;
+  UErrorCode status = U_ZERO_ERROR;
+  {
+    DisallowHeapAllocation no_gc;
+    String::FlatContent flat = s->GetFlatContent();
+    const UChar* src = GetUCharBufferFromFlat(flat, &sap, length);
+    icu::UnicodeString input(false, src, length);
+    // Getting a singleton. Should not free it.
+    const icu::Normalizer2* normalizer =
+        icu::Normalizer2::getInstance(nullptr, normalizationForms[form_id].name,
+                                      normalizationForms[form_id].mode, status);
+    DCHECK(U_SUCCESS(status));
+    RUNTIME_ASSERT(normalizer != nullptr);
+    int32_t normalized_prefix_length =
+        normalizer->spanQuickCheckYes(input, status);
+    // Quick return if the input is already normalized.
+    if (length == normalized_prefix_length) return *s;
+    icu::UnicodeString unnormalized =
+        input.tempSubString(normalized_prefix_length);
+    // Read-only alias of the normalized prefix.
+    result.setTo(false, input.getBuffer(), normalized_prefix_length);
+    // copy-on-write; normalize the suffix and append to |result|.
+    normalizer->normalizeSecondAndAppend(result, unnormalized, status);
+  }
+
   if (U_FAILURE(status)) {
     return isolate->heap()->undefined_value();
   }
@@ -640,9 +693,9 @@
   // Make object handle weak so we can delete the break iterator once GC kicks
   // in.
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
-  GlobalHandles::MakeWeak(wrapper.location(),
-                          reinterpret_cast<void*>(wrapper.location()),
-                          BreakIterator::DeleteBreakIterator);
+  GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+                          BreakIterator::DeleteBreakIterator,
+                          WeakCallbackType::kInternalFields);
   return *local_object;
 }
 
@@ -663,9 +716,13 @@
       break_iterator_holder->GetInternalField(1));
   delete u_text;
 
-  v8::String::Value text_value(v8::Utils::ToLocal(text));
-  u_text = new icu::UnicodeString(reinterpret_cast<const UChar*>(*text_value),
-                                  text_value.length());
+  int length = text->length();
+  text = String::Flatten(text);
+  DisallowHeapAllocation no_gc;
+  String::FlatContent flat = text->GetFlatContent();
+  base::SmartArrayPointer<uc16> sap;
+  const UChar* text_value = GetUCharBufferFromFlat(flat, &sap, length);
+  u_text = new icu::UnicodeString(text_value, length);
   break_iterator_holder->SetInternalField(1, reinterpret_cast<Smi*>(u_text));
 
   break_iterator->setText(*u_text);
@@ -749,6 +806,345 @@
     return *isolate->factory()->NewStringFromStaticChars("unknown");
   }
 }
+
+namespace {
+void ConvertCaseWithTransliterator(icu::UnicodeString* input,
+                                   const char* transliterator_id) {
+  UErrorCode status = U_ZERO_ERROR;
+  base::SmartPointer<icu::Transliterator> translit(
+      icu::Transliterator::createInstance(
+          icu::UnicodeString(transliterator_id, -1, US_INV), UTRANS_FORWARD,
+          status));
+  if (U_FAILURE(status)) return;
+  translit->transliterate(*input);
+}
+
+MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
+                                          bool is_to_upper, const char* lang) {
+  int32_t src_length = s->length();
+
+  // Greek uppercasing has to be done via transliteration.
+  // TODO(jshin): Drop this special-casing once ICU's regular case conversion
+  // API supports Greek uppercasing. See
+  // http://bugs.icu-project.org/trac/ticket/10582 .
+  // In the meantime, if there's no Greek character in |s|, call this
+  // function again with the root locale (lang="").
+  // ICU's C API for transliteration is nasty and we just use C++ API.
+  if (V8_UNLIKELY(is_to_upper && lang[0] == 'e' && lang[1] == 'l')) {
+    icu::UnicodeString converted;
+    base::SmartArrayPointer<uc16> sap;
+    {
+      DisallowHeapAllocation no_gc;
+      String::FlatContent flat = s->GetFlatContent();
+      const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
+      // Starts with the source string (read-only alias with copy-on-write
+      // semantics) and will be modified to contain the converted result.
+      // Using read-only alias at first saves one copy operation if
+      // transliteration does not change the input, which is rather rare.
+      // Moreover, transliteration takes rather long so that saving one copy
+      // helps only a little bit.
+      converted.setTo(false, src, src_length);
+      ConvertCaseWithTransliterator(&converted, "el-Upper");
+      // If no change is made, just return |s|.
+      if (converted.getBuffer() == src) return *s;
+    }
+    Handle<String> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result,
+        isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+            reinterpret_cast<const uint16_t*>(converted.getBuffer()),
+            converted.length())));
+    return *result;
+  }
+
+  auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
+
+  int32_t dest_length = src_length;
+  UErrorCode status;
+  Handle<SeqTwoByteString> result;
+  base::SmartArrayPointer<uc16> sap;
+
+  // This is not a real loop. It'll be executed only once (no overflow) or
+  // twice (overflow).
+  for (int i = 0; i < 2; ++i) {
+    result =
+        isolate->factory()->NewRawTwoByteString(dest_length).ToHandleChecked();
+    DisallowHeapAllocation no_gc;
+    String::FlatContent flat = s->GetFlatContent();
+    const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
+    status = U_ZERO_ERROR;
+    dest_length = case_converter(reinterpret_cast<UChar*>(result->GetChars()),
+                                 dest_length, src, src_length, lang, &status);
+    if (status != U_BUFFER_OVERFLOW_ERROR) break;
+  }
+
+  // In most cases, the output will fill the destination buffer completely
+  // leading to an unterminated string (U_STRING_NOT_TERMINATED_WARNING).
+  // Only in rare cases, it'll be shorter than the destination buffer and
+  // |result| has to be truncated.
+  DCHECK(U_SUCCESS(status));
+  if (V8_LIKELY(status == U_STRING_NOT_TERMINATED_WARNING)) {
+    DCHECK(dest_length == result->length());
+    return *result;
+  }
+  if (U_SUCCESS(status)) {
+    DCHECK(dest_length < result->length());
+    return *Handle<SeqTwoByteString>::cast(
+        SeqString::Truncate(result, dest_length));
+  }
+  return *s;
+}
+
+inline bool IsASCIIUpper(uint16_t ch) { return ch >= 'A' && ch <= 'Z'; }
+
+const uint8_t kToLower[256] = {
+    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+    0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+    0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
+    0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+    0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
+    0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+    0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73,
+    0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+    0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,
+    0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+    0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
+    0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+    0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B,
+    0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+    0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3,
+    0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+    0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB,
+    0xEC, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xD7,
+    0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3,
+    0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+    0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB,
+    0xFC, 0xFD, 0xFE, 0xFF,
+};
+
+inline uint16_t ToLatin1Lower(uint16_t ch) {
+  return static_cast<uint16_t>(kToLower[ch]);
+}
+
+inline uint16_t ToASCIIUpper(uint16_t ch) {
+  return ch & ~((ch >= 'a' && ch <= 'z') << 5);
+}
+
+// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
+inline uint16_t ToLatin1Upper(uint16_t ch) {
+  DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
+  return ch &
+         ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xE7))
+           << 5);
+}
+
+template <typename Char>
+bool ToUpperFastASCII(const Vector<const Char>& src,
+                      Handle<SeqOneByteString> result) {
+  // Do a faster loop for the case where all the characters are ASCII.
+  uint16_t ored = 0;
+  int32_t index = 0;
+  for (auto it = src.begin(); it != src.end(); ++it) {
+    uint16_t ch = static_cast<uint16_t>(*it);
+    ored |= ch;
+    result->SeqOneByteStringSet(index++, ToASCIIUpper(ch));
+  }
+  return !(ored & ~0x7F);
+}
+
+const uint16_t sharp_s = 0xDF;
+
+template <typename Char>
+bool ToUpperOneByte(const Vector<const Char>& src,
+                    Handle<SeqOneByteString> result, int* sharp_s_count) {
+  // Still pretty-fast path for the input with non-ASCII Latin-1 characters.
+
+  // There are two special cases.
+  //  1. U+00B5 and U+00FF are mapped to a character beyond U+00FF.
+  //  2. Lower case sharp-S converts to "SS" (two characters)
+  *sharp_s_count = 0;
+  int32_t index = 0;
+  for (auto it = src.begin(); it != src.end(); ++it) {
+    uint16_t ch = static_cast<uint16_t>(*it);
+    if (V8_UNLIKELY(ch == sharp_s)) {
+      ++(*sharp_s_count);
+      continue;
+    }
+    if (V8_UNLIKELY(ch == 0xB5 || ch == 0xFF)) {
+      // Since this upper-cased character does not fit in an 8-bit string, we
+      // need to take the 16-bit path.
+      return false;
+    }
+    result->SeqOneByteStringSet(index++, ToLatin1Upper(ch));
+  }
+
+  return true;
+}
+
+template <typename Char>
+void ToUpperWithSharpS(const Vector<const Char>& src,
+                       Handle<SeqOneByteString> result) {
+  int32_t dest_index = 0;
+  for (auto it = src.begin(); it != src.end(); ++it) {
+    uint16_t ch = static_cast<uint16_t>(*it);
+    if (ch == sharp_s) {
+      result->SeqOneByteStringSet(dest_index++, 'S');
+      result->SeqOneByteStringSet(dest_index++, 'S');
+    } else {
+      result->SeqOneByteStringSet(dest_index++, ToLatin1Upper(ch));
+    }
+  }
+}
+
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(args.length(), 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+
+  int length = s->length();
+  s = String::Flatten(s);
+  // First scan the string for uppercase and non-ASCII characters:
+  if (s->HasOnlyOneByteChars()) {
+    unsigned first_index_to_lower = length;
+    for (int index = 0; index < length; ++index) {
+      // Blink specializes this path for one-byte strings, so it
+      // does not need to do a generic get, but can do the equivalent
+      // of SeqOneByteStringGet.
+      uint16_t ch = s->Get(index);
+      if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
+        first_index_to_lower = index;
+        break;
+      }
+    }
+
+    // Nothing to do if the string is all ASCII with no uppercase.
+    if (first_index_to_lower == length) return *s;
+
+    // We depend here on the invariant that the length of a Latin1
+    // string is invariant under ToLowerCase, and the result always
+    // fits in the Latin1 range in the *root locale*. It does not hold
+    // for ToUpperCase even in the root locale.
+    Handle<SeqOneByteString> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, isolate->factory()->NewRawOneByteString(length));
+
+    DisallowHeapAllocation no_gc;
+    String::FlatContent flat = s->GetFlatContent();
+    if (flat.IsOneByte()) {
+      const uint8_t* src = flat.ToOneByteVector().start();
+      CopyChars(result->GetChars(), src, first_index_to_lower);
+      for (int index = first_index_to_lower; index < length; ++index) {
+        uint16_t ch = static_cast<uint16_t>(src[index]);
+        result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
+      }
+    } else {
+      const uint16_t* src = flat.ToUC16Vector().start();
+      CopyChars(result->GetChars(), src, first_index_to_lower);
+      for (int index = first_index_to_lower; index < length; ++index) {
+        uint16_t ch = src[index];
+        result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
+      }
+    }
+
+    return *result;
+  }
+
+  // Blink had an additional case here for ASCII 2-byte strings, but
+  // that is subsumed by the above code (assuming there isn't a false
+  // negative for HasOnlyOneByteChars).
+
+  // Do a slower implementation for cases that include non-ASCII characters.
+  return LocaleConvertCase(s, isolate, false, "");
+}
+
+RUNTIME_FUNCTION(Runtime_StringToUpperCaseI18N) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(args.length(), 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+
+  // This function could be optimized for no-op cases the way lowercase
+  // counterpart is, but in empirical testing, few actual calls to upper()
+  // are no-ops. So, it wouldn't be worth the extra time for pre-scanning.
+
+  int32_t length = s->length();
+  s = String::Flatten(s);
+
+  if (s->HasOnlyOneByteChars()) {
+    Handle<SeqOneByteString> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, isolate->factory()->NewRawOneByteString(length));
+
+    int sharp_s_count;
+    bool is_result_single_byte;
+    {
+      DisallowHeapAllocation no_gc;
+      String::FlatContent flat = s->GetFlatContent();
+      // If it was ok to slow down ASCII-only input slightly, ToUpperFastASCII
+      // could be removed  because ToUpperOneByte is pretty fast now (it
+      // does not call ICU API any more.).
+      if (flat.IsOneByte()) {
+        Vector<const uint8_t> src = flat.ToOneByteVector();
+        if (ToUpperFastASCII(src, result)) return *result;
+        is_result_single_byte = ToUpperOneByte(src, result, &sharp_s_count);
+      } else {
+        DCHECK(flat.IsTwoByte());
+        Vector<const uint16_t> src = flat.ToUC16Vector();
+        if (ToUpperFastASCII(src, result)) return *result;
+        is_result_single_byte = ToUpperOneByte(src, result, &sharp_s_count);
+      }
+    }
+
+    // Go to the full Unicode path if there are characters whose uppercase
+    // is beyond the Latin-1 range (cannot be represented in OneByteString).
+    if (V8_UNLIKELY(!is_result_single_byte)) {
+      return LocaleConvertCase(s, isolate, true, "");
+    }
+
+    if (sharp_s_count == 0) return *result;
+
+    // We have sharp_s_count sharp-s characters, but the result is still
+    // in the Latin-1 range.
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result,
+        isolate->factory()->NewRawOneByteString(length + sharp_s_count));
+    DisallowHeapAllocation no_gc;
+    String::FlatContent flat = s->GetFlatContent();
+    if (flat.IsOneByte()) {
+      ToUpperWithSharpS(flat.ToOneByteVector(), result);
+    } else {
+      ToUpperWithSharpS(flat.ToUC16Vector(), result);
+    }
+
+    return *result;
+  }
+
+  return LocaleConvertCase(s, isolate, true, "");
+}
+
+RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(args.length(), 3);
+  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+  CONVERT_BOOLEAN_ARG_CHECKED(is_upper, 1);
+  CONVERT_ARG_HANDLE_CHECKED(SeqOneByteString, lang, 2);
+
+  // All the languages requiring special handling ("az", "el", "lt", "tr")
+  // have a 2-letter language code.
+  DCHECK(lang->length() == 2);
+  uint8_t lang_str[3];
+  memcpy(lang_str, lang->GetChars(), 2);
+  lang_str[2] = 0;
+  s = String::Flatten(s);
+  // TODO(jshin): Consider adding a fast path for ASCII or Latin-1. The fastpath
+  // in the root locale needs to be adjusted for az, lt and tr because even case
+  // mapping of ASCII range characters are different in those locales.
+  // Greek (el) does not require any adjustment, though.
+  return LocaleConvertCase(s, isolate, is_upper,
+                           reinterpret_cast<const char*>(lang_str));
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc
index d871fc7..f805fdb 100644
--- a/src/runtime/runtime-internal.cc
+++ b/src/runtime/runtime-internal.cc
@@ -96,6 +96,59 @@
   return isolate->StackOverflow();
 }
 
+RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_SMI_ARG_CHECKED(message_id, 0);
+  CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
+  Handle<Object> error_obj = isolate->factory()->NewError(
+      static_cast<MessageTemplate::Template>(message_id));
+
+  // For wasm traps, the byte offset (a.k.a source position) can not be
+  // determined from relocation info, since the explicit checks for traps
+  // converge in one singe block which calls this runtime function.
+  // We hence pass the byte offset explicitely, and patch it into the top-most
+  // frame (a wasm frame) on the collected stack trace.
+  // TODO(wasm): This implementation is temporary, see bug #5007:
+  // https://bugs.chromium.org/p/v8/issues/detail?id=5007
+  Handle<JSObject> error = Handle<JSObject>::cast(error_obj);
+  Handle<Object> stack_trace_obj = JSReceiver::GetDataProperty(
+      error, isolate->factory()->stack_trace_symbol());
+  // Patch the stack trace (array of <receiver, function, code, position>).
+  if (stack_trace_obj->IsJSArray()) {
+    Handle<FixedArray> stack_elements(
+        FixedArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
+    DCHECK_EQ(1, stack_elements->length() % 4);
+    DCHECK(Code::cast(stack_elements->get(3))->kind() == Code::WASM_FUNCTION);
+    DCHECK(stack_elements->get(4)->IsSmi() &&
+           Smi::cast(stack_elements->get(4))->value() >= 0);
+    stack_elements->set(4, Smi::FromInt(-1 - byte_offset));
+  }
+  Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
+      error, isolate->factory()->detailed_stack_trace_symbol());
+  // Patch the detailed stack trace (array of JSObjects with various
+  // properties).
+  if (detailed_stack_trace_obj->IsJSArray()) {
+    Handle<FixedArray> stack_elements(
+        FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
+    DCHECK_GE(stack_elements->length(), 1);
+    Handle<JSObject> top_frame(JSObject::cast(stack_elements->get(0)));
+    Handle<String> wasm_offset_key =
+        isolate->factory()->InternalizeOneByteString(
+            STATIC_CHAR_VECTOR("column"));
+    LookupIterator it(top_frame, wasm_offset_key, top_frame,
+                      LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+    if (it.IsFound()) {
+      DCHECK(JSReceiver::GetDataProperty(&it)->IsSmi());
+      Maybe<bool> data_set = JSReceiver::SetDataProperty(
+          &it, handle(Smi::FromInt(byte_offset), isolate));
+      DCHECK(data_set.IsJust() && data_set.FromJust() == true);
+      USE(data_set);
+    }
+  }
+
+  return isolate->Throw(*error_obj);
+}
 
 RUNTIME_FUNCTION(Runtime_UnwindAndFindExceptionHandler) {
   SealHandleScope shs(isolate);
@@ -160,6 +213,15 @@
       isolate, NewTypeError(MessageTemplate::kIllegalInvocation));
 }
 
+RUNTIME_FUNCTION(Runtime_ThrowIncompatibleMethodReceiver) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, arg1, 1);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, arg0, arg1));
+}
 
 RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
   HandleScope scope(isolate);
@@ -170,6 +232,12 @@
       NewTypeError(MessageTemplate::kIteratorResultNotAnObject, value));
 }
 
+RUNTIME_FUNCTION(Runtime_ThrowGeneratorRunning) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kGeneratorRunning));
+}
 
 RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
   HandleScope scope(isolate);
@@ -309,15 +377,15 @@
   return *result;
 }
 
-#define CALLSITE_GET(NAME, RETURN)                          \
-  RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) {            \
-    HandleScope scope(isolate);                             \
-    DCHECK(args.length() == 1);                             \
-    CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
-    Handle<String> result;                                  \
-    CallSite call_site(isolate, call_site_obj);             \
-    RUNTIME_ASSERT(call_site.IsValid());                    \
-    return RETURN(call_site.NAME(), isolate);               \
+#define CALLSITE_GET(NAME, RETURN)                                  \
+  RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) {                    \
+    HandleScope scope(isolate);                                     \
+    DCHECK(args.length() == 1);                                     \
+    CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0);         \
+    Handle<String> result;                                          \
+    CallSite call_site(isolate, call_site_obj);                     \
+    RUNTIME_ASSERT(call_site.IsJavaScript() || call_site.IsWasm()); \
+    return RETURN(call_site.NAME(), isolate);                       \
   }
 
 static inline Object* ReturnDereferencedHandle(Handle<Object> obj,
@@ -416,6 +484,13 @@
       isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callsite));
 }
 
+RUNTIME_FUNCTION(Runtime_ThrowCalledOnNullOrUndefined) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, name));
+}
 
 RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
   HandleScope scope(isolate);
@@ -456,21 +531,75 @@
   return isolate->heap()->undefined_value();
 }
 
-RUNTIME_FUNCTION(Runtime_GetOrdinaryHasInstance) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(0, args.length());
-
-  return isolate->native_context()->ordinary_has_instance();
-}
-
 RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
   HandleScope scope(isolate);
-  DCHECK_EQ(0, args.length());
-  std::stringstream stats_stream;
-  isolate->counters()->runtime_call_stats()->Print(stats_stream);
-  Handle<String> result =
-      isolate->factory()->NewStringFromAsciiChecked(stats_stream.str().c_str());
-  isolate->counters()->runtime_call_stats()->Reset();
+  if (args.length() == 0) {
+    // Without arguments, the result is returned as a string.
+    DCHECK_EQ(0, args.length());
+    std::stringstream stats_stream;
+    isolate->counters()->runtime_call_stats()->Print(stats_stream);
+    Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(
+        stats_stream.str().c_str());
+    isolate->counters()->runtime_call_stats()->Reset();
+    return *result;
+  } else {
+    DCHECK_LE(args.length(), 2);
+    std::FILE* f;
+    if (args[0]->IsString()) {
+      // With a string argument, the results are appended to that file.
+      CONVERT_ARG_HANDLE_CHECKED(String, arg0, 0);
+      String::FlatContent flat = arg0->GetFlatContent();
+      const char* filename =
+          reinterpret_cast<const char*>(&(flat.ToOneByteVector()[0]));
+      f = std::fopen(filename, "a");
+      DCHECK_NOT_NULL(f);
+    } else {
+      // With an integer argument, the results are written to stdout/stderr.
+      CONVERT_SMI_ARG_CHECKED(fd, 0);
+      DCHECK(fd == 1 || fd == 2);
+      f = fd == 1 ? stdout : stderr;
+    }
+    // The second argument (if any) is a message header to be printed.
+    if (args.length() >= 2) {
+      CONVERT_ARG_HANDLE_CHECKED(String, arg1, 1);
+      arg1->PrintOn(f);
+      std::fputc('\n', f);
+      std::fflush(f);
+    }
+    OFStream stats_stream(f);
+    isolate->counters()->runtime_call_stats()->Print(stats_stream);
+    isolate->counters()->runtime_call_stats()->Reset();
+    if (args[0]->IsString())
+      std::fclose(f);
+    else
+      std::fflush(f);
+    return isolate->heap()->undefined_value();
+  }
+}
+
+RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
+  isolate->EnqueueMicrotask(microtask);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 0);
+  isolate->RunMicrotasks();
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, callable, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 1);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, Object::OrdinaryHasInstance(isolate, callable, object));
   return *result;
 }
 
diff --git a/src/runtime/runtime-interpreter.cc b/src/runtime/runtime-interpreter.cc
index 22ae911..f870d23 100644
--- a/src/runtime/runtime-interpreter.cc
+++ b/src/runtime/runtime-interpreter.cc
@@ -64,14 +64,11 @@
     os << " ]" << std::endl;
   }
 
-  // Find the location of the register file.
+  // Print the registers.
   JavaScriptFrameIterator frame_iterator(
       bytecode_iterator.bytecode_array()->GetIsolate());
-  JavaScriptFrame* frame = frame_iterator.frame();
-  Address register_file =
-      frame->fp() + InterpreterFrameConstants::kRegisterFilePointerFromFp;
-
-  // Print the registers.
+  InterpretedFrame* frame =
+      reinterpret_cast<InterpretedFrame*>(frame_iterator.frame());
   int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode);
   for (int operand_index = 0; operand_index < operand_count; operand_index++) {
     interpreter::OperandType operand_type =
@@ -86,8 +83,7 @@
       int range = bytecode_iterator.GetRegisterOperandRange(operand_index);
       for (int reg_index = first_reg.index();
            reg_index < first_reg.index() + range; reg_index++) {
-        Address reg_location = register_file - reg_index * kPointerSize;
-        Object* reg_object = Memory::Object_at(reg_location);
+        Object* reg_object = frame->ReadInterpreterRegister(reg_index);
         os << "      [ " << std::setw(kRegFieldWidth)
            << interpreter::Register(reg_index).ToString(
                   bytecode_iterator.bytecode_array()->parameter_count())
@@ -117,10 +113,10 @@
   AdvanceToOffsetForTracing(bytecode_iterator, offset);
   if (offset == bytecode_iterator.current_offset()) {
     // Print bytecode.
-    const uint8_t* bytecode_address =
-        reinterpret_cast<const uint8_t*>(*bytecode_array) + bytecode_offset;
-    os << " -> " << static_cast<const void*>(bytecode_address)
-       << " (" << bytecode_offset << ") : ";
+    const uint8_t* base_address = bytecode_array->GetFirstBytecodeAddress();
+    const uint8_t* bytecode_address = base_address + offset;
+    os << " -> " << static_cast<const void*>(bytecode_address) << " @ "
+       << std::setw(4) << offset << " : ";
     interpreter::Bytecodes::Decode(os, bytecode_address,
                                    bytecode_array->parameter_count());
     os << std::endl;
diff --git a/src/runtime/runtime-json.cc b/src/runtime/runtime-json.cc
index 07232d5..72fc758 100644
--- a/src/runtime/runtime-json.cc
+++ b/src/runtime/runtime-json.cc
@@ -8,7 +8,6 @@
 #include "src/char-predicates-inl.h"
 #include "src/isolate-inl.h"
 #include "src/json-parser.h"
-#include "src/json-stringifier.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
@@ -20,23 +19,20 @@
   DCHECK(args.length() == 1);
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, BasicJsonStringifier::StringifyString(isolate, string));
+      isolate, result, Runtime::BasicJsonStringifyString(isolate, string));
   return *result;
 }
 
-
 RUNTIME_FUNCTION(Runtime_BasicJSONStringify) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  BasicJsonStringifier stringifier(isolate);
   Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     stringifier.Stringify(object));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, Runtime::BasicJsonStringify(isolate, object));
   return *result;
 }
 
-
 RUNTIME_FUNCTION(Runtime_ParseJson) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index f14a7cf..34feeba 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -109,7 +109,7 @@
   return boilerplate;
 }
 
-MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
+static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
     Isolate* isolate, Handle<LiteralsArray> literals,
     Handle<FixedArray> elements) {
   // Create the JSArray.
@@ -191,8 +191,7 @@
     case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
       return CreateObjectLiteralBoilerplate(isolate, literals, elements, false);
     case CompileTimeValue::ARRAY_LITERAL:
-      return Runtime::CreateArrayLiteralBoilerplate(isolate, literals,
-                                                    elements);
+      return CreateArrayLiteralBoilerplate(isolate, literals, elements);
     default:
       UNREACHABLE();
       return MaybeHandle<Object>();
@@ -280,7 +279,7 @@
     Handle<Object> boilerplate;
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, boilerplate,
-        Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements),
+        CreateArrayLiteralBoilerplate(isolate, literals, elements),
         AllocationSite);
 
     AllocationSiteCreationContext creation_context(isolate);
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index 5bdb085..8c9c230 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -8,6 +8,7 @@
 #include "src/bootstrapper.h"
 #include "src/debug/debug.h"
 #include "src/isolate-inl.h"
+#include "src/json-stringifier.h"
 #include "src/messages.h"
 #include "src/property-descriptor.h"
 #include "src/runtime/runtime.h"
@@ -119,7 +120,7 @@
                                           LanguageMode language_mode) {
   bool success = false;
   LookupIterator it = LookupIterator::PropertyOrElement(
-      isolate, receiver, key, &success, LookupIterator::HIDDEN);
+      isolate, receiver, key, &success, LookupIterator::OWN);
   if (!success) return Nothing<bool>();
 
   return JSReceiver::DeleteProperty(&it, language_mode);
@@ -168,7 +169,7 @@
     }
 
     // Slow case.
-    LookupIterator::Configuration c = LookupIterator::HIDDEN;
+    LookupIterator::Configuration c = LookupIterator::OWN;
     LookupIterator it = key_is_array_index
                             ? LookupIterator(isolate, js_obj, index, js_obj, c)
                             : LookupIterator(js_obj, key, js_obj, c);
@@ -225,6 +226,15 @@
   return value;
 }
 
+MaybeHandle<Object> Runtime::BasicJsonStringify(Isolate* isolate,
+                                                Handle<Object> object) {
+  return BasicJsonStringifier(isolate).Stringify(object);
+}
+
+MaybeHandle<Object> Runtime::BasicJsonStringifyString(Isolate* isolate,
+                                                      Handle<String> string) {
+  return BasicJsonStringifier::StringifyString(isolate, string);
+}
 
 RUNTIME_FUNCTION(Runtime_GetPrototype) {
   HandleScope scope(isolate);
@@ -260,85 +270,6 @@
   return *obj;
 }
 
-
-// Enumerator used as indices into the array returned from GetOwnProperty
-enum PropertyDescriptorIndices {
-  IS_ACCESSOR_INDEX,
-  VALUE_INDEX,
-  GETTER_INDEX,
-  SETTER_INDEX,
-  WRITABLE_INDEX,
-  ENUMERABLE_INDEX,
-  CONFIGURABLE_INDEX,
-  DESCRIPTOR_SIZE
-};
-
-
-MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
-                                                          Handle<JSObject> obj,
-                                                          Handle<Name> name) {
-  Heap* heap = isolate->heap();
-  Factory* factory = isolate->factory();
-
-  // Get attributes.
-  LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name, obj,
-                                                        LookupIterator::HIDDEN);
-  Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
-
-  if (!maybe.IsJust()) return MaybeHandle<Object>();
-  PropertyAttributes attrs = maybe.FromJust();
-  if (attrs == ABSENT) return factory->undefined_value();
-
-  DCHECK(!isolate->has_pending_exception());
-  Handle<FixedArray> elms = factory->NewFixedArray(DESCRIPTOR_SIZE);
-  elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
-  elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
-
-  bool is_accessor_pair = it.state() == LookupIterator::ACCESSOR &&
-                          it.GetAccessors()->IsAccessorPair();
-  elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(is_accessor_pair));
-
-  if (is_accessor_pair) {
-    Handle<AccessorPair> accessors =
-        Handle<AccessorPair>::cast(it.GetAccessors());
-    Handle<Object> getter =
-        AccessorPair::GetComponent(accessors, ACCESSOR_GETTER);
-    Handle<Object> setter =
-        AccessorPair::GetComponent(accessors, ACCESSOR_SETTER);
-    elms->set(GETTER_INDEX, *getter);
-    elms->set(SETTER_INDEX, *setter);
-  } else {
-    Handle<Object> value;
-    ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::GetProperty(&it),
-                               Object);
-    elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
-    elms->set(VALUE_INDEX, *value);
-  }
-
-  return factory->NewJSArrayWithElements(elms);
-}
-
-
-// Returns an array with the property description:
-//  if args[1] is not a property on args[0]
-//          returns undefined
-//  if args[1] is a data property on args[0]
-//         [false, value, Writeable, Enumerable, Configurable]
-//  if args[1] is an accessor on args[0]
-//         [true, GetFunction, SetFunction, Enumerable, Configurable]
-// TODO(jkummerow): Deprecated. Remove all callers and delete.
-RUNTIME_FUNCTION(Runtime_GetOwnProperty_Legacy) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     GetOwnProperty(isolate, obj, name));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -369,7 +300,7 @@
   Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
   Handle<JSGlobalObject> global_object(script_context->global_object(),
                                        isolate);
-  LookupIterator it(global_object, name, global_object, LookupIterator::HIDDEN);
+  LookupIterator it(global_object, name, global_object, LookupIterator::OWN);
 
   // Switch to fast mode only if there is a data property and it's not on
   // a hidden prototype.
@@ -404,7 +335,7 @@
   Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
   Handle<JSGlobalObject> global_object(script_context->global_object(),
                                        isolate);
-  LookupIterator it(global_object, name, global_object, LookupIterator::HIDDEN);
+  LookupIterator it(global_object, name, global_object, LookupIterator::OWN);
 
   // Switch to fast mode only if there is a data property and it's not on
   // a hidden prototype.
@@ -726,30 +657,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_GlobalProxy) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
-  return function->context()->global_proxy();
-}
-
-
-RUNTIME_FUNCTION(Runtime_LookupAccessor) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-  CONVERT_SMI_ARG_CHECKED(flag, 2);
-  AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
-  if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -823,34 +730,6 @@
 }
 
 
-// Implements part of 8.12.9 DefineOwnProperty.
-// There are 3 cases that lead here:
-// Step 4a - define a new data property.
-// Steps 9b & 12 - replace an existing accessor property with a data property.
-// Step 12 - update an existing data property with a data or generic
-//           descriptor.
-RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-  CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
-
-  LookupIterator it = LookupIterator::PropertyOrElement(
-      isolate, object, name, object, LookupIterator::OWN);
-  if (it.state() == LookupIterator::ACCESS_CHECK && !it.HasAccess()) {
-    return isolate->heap()->undefined_value();
-  }
-
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, JSObject::DefineOwnPropertyIgnoreAttributes(
-                           &it, value, attrs, JSObject::DONT_FORCE_FIELD));
-
-  return *result;
-}
-
 RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 5);
@@ -1104,97 +983,6 @@
   return isolate->heap()->exception();
 }
 
-
-RUNTIME_FUNCTION(Runtime_InstanceOf) {
-  // TODO(4447): Remove this function when ES6 instanceof ships for good.
-  DCHECK(!FLAG_harmony_instanceof);
-
-  // ECMA-262, section 11.8.6, page 54.
-  HandleScope shs(isolate);
-  DCHECK_EQ(2, args.length());
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
-  // {callable} must have a [[Call]] internal method.
-  if (!callable->IsCallable()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate,
-        NewTypeError(MessageTemplate::kInstanceofFunctionExpected, callable));
-  }
-  // If {object} is not a receiver, return false.
-  if (!object->IsJSReceiver()) {
-    return isolate->heap()->false_value();
-  }
-  // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
-  // and use that instead of {callable}.
-  while (callable->IsJSBoundFunction()) {
-    callable =
-        handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
-               isolate);
-  }
-  DCHECK(callable->IsCallable());
-  // Get the "prototype" of {callable}; raise an error if it's not a receiver.
-  Handle<Object> prototype;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, prototype,
-      JSReceiver::GetProperty(Handle<JSReceiver>::cast(callable),
-                              isolate->factory()->prototype_string()));
-  if (!prototype->IsJSReceiver()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate,
-        NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
-  }
-  // Return whether or not {prototype} is in the prototype chain of {object}.
-  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-  Maybe<bool> result =
-      JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
-  MAYBE_RETURN(result, isolate->heap()->exception());
-  return isolate->heap()->ToBoolean(result.FromJust());
-}
-
-RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
-  // ES6 section 19.2.3.6 Function.prototype[@@hasInstance](V)
-  HandleScope shs(isolate);
-  DCHECK_EQ(2, args.length());
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
-  // {callable} must have a [[Call]] internal method.
-  if (!callable->IsCallable()) {
-    return isolate->heap()->false_value();
-  }
-  // If {object} is not a receiver, return false.
-  if (!object->IsJSReceiver()) {
-    return isolate->heap()->false_value();
-  }
-  // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
-  // and use that instead of {callable}.
-  while (callable->IsJSBoundFunction()) {
-    callable =
-        handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
-               isolate);
-  }
-  DCHECK(callable->IsCallable());
-  // Get the "prototype" of {callable}; raise an error if it's not a receiver.
-  Handle<Object> prototype;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, prototype,
-      JSReceiver::GetProperty(Handle<JSReceiver>::cast(callable),
-                              isolate->factory()->prototype_string()));
-  if (!prototype->IsJSReceiver()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate,
-        NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
-  }
-  // Return whether or not {prototype} is in the prototype chain of {object}.
-  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-  Maybe<bool> result =
-      JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
-  MAYBE_RETURN(result, isolate->heap()->exception());
-  return isolate->heap()->ToBoolean(result.FromJust());
-}
-
-
 RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -1229,24 +1017,20 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ObjectDefineProperty) {
+RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, name, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, attributes, 2);
-  return JSReceiver::DefineProperty(isolate, o, name, attributes);
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObjectDefineProperties) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, properties, 1);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, o, JSReceiver::DefineProperties(isolate, o, properties));
-  return *o;
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, o, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+  bool success;
+  LookupIterator it = LookupIterator::PropertyOrElement(
+      isolate, o, key, &success, LookupIterator::OWN);
+  if (!success) return isolate->heap()->exception();
+  MAYBE_RETURN(
+      JSReceiver::CreateDataProperty(&it, value, Object::THROW_ON_ERROR),
+      isolate->heap()->exception());
+  return *value;
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-observe.cc b/src/runtime/runtime-observe.cc
deleted file mode 100644
index 0407b8a..0000000
--- a/src/runtime/runtime-observe.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/debug/debug.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-RUNTIME_FUNCTION(Runtime_IsObserved) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-
-  if (!args[0]->IsJSReceiver()) return isolate->heap()->false_value();
-  CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
-  DCHECK(!obj->IsJSGlobalProxy() || !obj->map()->is_observed());
-  return isolate->heap()->ToBoolean(obj->map()->is_observed());
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetIsObserved) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
-  RUNTIME_ASSERT(!obj->IsJSGlobalProxy());
-  if (obj->IsJSProxy()) return isolate->heap()->undefined_value();
-  RUNTIME_ASSERT(!obj->map()->is_observed());
-
-  DCHECK(obj->IsJSObject());
-  JSObject::SetObserved(Handle<JSObject>::cast(obj));
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
-  isolate->EnqueueMicrotask(microtask);
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  isolate->RunMicrotasks();
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_DeliverObservationChangeRecords) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callback, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, argument, 1);
-  v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
-  // We should send a message on uncaught exception thrown during
-  // Object.observe delivery while not interrupting further delivery, thus
-  // we make a call inside a verbose TryCatch.
-  catcher.SetVerbose(true);
-  Handle<Object> argv[] = {argument};
-
-  // If we are in step-in mode, flood the handler.
-  isolate->debug()->EnableStepIn();
-
-  USE(Execution::Call(isolate, callback, isolate->factory()->undefined_value(),
-                      arraysize(argv), argv));
-  if (isolate->has_pending_exception()) {
-    isolate->ReportPendingMessages();
-    isolate->clear_pending_exception();
-    isolate->set_external_caught_exception(false);
-  }
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetObservationState) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  isolate->CountUsage(v8::Isolate::kObjectObserve);
-  return isolate->heap()->observation_state();
-}
-
-
-static bool ContextsHaveSameOrigin(Handle<Context> context1,
-                                   Handle<Context> context2) {
-  return context1->security_token() == context2->security_token();
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObserverObjectAndRecordHaveSameOrigin) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, observer, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, record, 2);
-
-  while (observer->IsJSBoundFunction()) {
-    observer = handle(
-        Handle<JSBoundFunction>::cast(observer)->bound_target_function());
-  }
-  if (!observer->IsJSFunction()) return isolate->heap()->false_value();
-
-  Handle<Context> observer_context(
-      Handle<JSFunction>::cast(observer)->context()->native_context());
-  Handle<Context> object_context(object->GetCreationContext());
-  Handle<Context> record_context(record->GetCreationContext());
-
-  return isolate->heap()->ToBoolean(
-      ContextsHaveSameOrigin(object_context, observer_context) &&
-      ContextsHaveSameOrigin(object_context, record_context));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObjectWasCreatedInCurrentOrigin) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
-  Handle<Context> creation_context(object->GetCreationContext(), isolate);
-  return isolate->heap()->ToBoolean(
-      ContextsHaveSameOrigin(creation_context, isolate->native_context()));
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetObjectContextObjectObserve) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
-  Handle<Context> context(object->GetCreationContext(), isolate);
-  return context->native_object_observe();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetObjectContextObjectGetNotifier) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
-  Handle<Context> context(object->GetCreationContext(), isolate);
-  return context->native_object_get_notifier();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetObjectContextNotifierPerformChange) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object_info, 0);
-
-  Handle<Context> context(object_info->GetCreationContext(), isolate);
-  return context->native_object_notifier_perform_change();
-}
-}  // namespace internal
-}  // namespace v8
diff --git a/src/runtime/runtime-operators.cc b/src/runtime/runtime-operators.cc
index e55ab7c..78dd16f 100644
--- a/src/runtime/runtime-operators.cc
+++ b/src/runtime/runtime-operators.cc
@@ -216,5 +216,16 @@
   return isolate->heap()->ToBoolean(result.FromJust());
 }
 
+RUNTIME_FUNCTION(Runtime_InstanceOf) {
+  HandleScope shs(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, Object::InstanceOf(isolate, object, callable));
+  return *result;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-scopes.cc b/src/runtime/runtime-scopes.cc
index de0d66a..68df582 100644
--- a/src/runtime/runtime-scopes.cc
+++ b/src/runtime/runtime-scopes.cc
@@ -44,8 +44,7 @@
   }
 
   // Do the lookup own properties only, see ES5 erratum.
-  LookupIterator it(global, name, global,
-                    LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+  LookupIterator it(global, name, global, LookupIterator::OWN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   if (!maybe.IsJust()) return isolate->heap()->exception();
 
@@ -182,8 +181,7 @@
   Handle<JSGlobalObject> global = isolate->global_object();
 
   // Lookup the property as own on the global object.
-  LookupIterator it(global, name, global,
-                    LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+  LookupIterator it(global, name, global, LookupIterator::OWN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
   DCHECK(maybe.IsJust());
   PropertyAttributes old_attributes = maybe.FromJust();
@@ -237,10 +235,7 @@
     // Check for a conflict with a lexically scoped variable
     context_arg->Lookup(name, LEXICAL_TEST, &index, &attributes,
                         &binding_flags);
-    if (attributes != ABSENT &&
-        (binding_flags == MUTABLE_CHECK_INITIALIZED ||
-         binding_flags == IMMUTABLE_CHECK_INITIALIZED ||
-         binding_flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY)) {
+    if (attributes != ABSENT && binding_flags == BINDING_CHECK_INITIALIZED) {
       return ThrowRedeclarationError(isolate, name);
     }
     attr = static_cast<PropertyAttributes>(attr & ~EVAL_DECLARED);
@@ -339,86 +334,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
-  DCHECK(!value->IsTheHole());
-  // Initializations are always done in a function or native context.
-  CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 1);
-  Handle<Context> context(context_arg->declaration_context());
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
-
-  int index;
-  PropertyAttributes attributes;
-  ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
-  BindingFlags binding_flags;
-  Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes, &binding_flags);
-  if (holder.is_null()) {
-    // In case of JSProxy, an exception might have been thrown.
-    if (isolate->has_pending_exception()) return isolate->heap()->exception();
-  }
-
-  if (index != Context::kNotFound) {
-    DCHECK(holder->IsContext());
-    // Property was found in a context.  Perform the assignment if the constant
-    // was uninitialized.
-    Handle<Context> context = Handle<Context>::cast(holder);
-    DCHECK((attributes & READ_ONLY) != 0);
-    if (context->get(index)->IsTheHole()) context->set(index, *value);
-    return *value;
-  }
-
-  PropertyAttributes attr =
-      static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
-
-  // Strict mode handling not needed (legacy const is disallowed in strict
-  // mode).
-
-  // The declared const was configurable, and may have been deleted in the
-  // meanwhile. If so, re-introduce the variable in the context extension.
-  if (attributes == ABSENT) {
-    Handle<Context> declaration_context(context_arg->declaration_context());
-    if (declaration_context->IsScriptContext()) {
-      holder = handle(declaration_context->global_object(), isolate);
-    } else {
-      holder = handle(declaration_context->extension_object(), isolate);
-      DCHECK(!holder.is_null());
-    }
-    CHECK(holder->IsJSObject());
-  } else {
-    // For JSContextExtensionObjects, the initializer can be run multiple times
-    // if in a for loop: for (var i = 0; i < 2; i++) { const x = i; }. Only the
-    // first assignment should go through. For JSGlobalObjects, additionally any
-    // code can run in between that modifies the declared property.
-    DCHECK(holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject());
-
-    LookupIterator it(holder, name, Handle<JSReceiver>::cast(holder),
-                      LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
-    Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
-    if (!maybe.IsJust()) return isolate->heap()->exception();
-    PropertyAttributes old_attributes = maybe.FromJust();
-
-    // Ignore if we can't reconfigure the value.
-    if ((old_attributes & DONT_DELETE) != 0) {
-      if ((old_attributes & READ_ONLY) != 0 ||
-          it.state() == LookupIterator::ACCESSOR) {
-        return *value;
-      }
-      attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY);
-    }
-  }
-
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate, JSObject::SetOwnPropertyIgnoreAttributes(
-                   Handle<JSObject>::cast(holder), name, value, attr));
-
-  return *value;
-}
-
-
 namespace {
 
 // Find the arguments of the JavaScript function invocation that called
@@ -648,7 +563,7 @@
   {
     DisallowHeapAllocation no_gc;
     FixedArray* elements = FixedArray::cast(result->elements());
-    WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+    WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
     for (int i = 0; i < num_elements; i++) {
       elements->set(i, *arguments[i + start_index], mode);
     }
@@ -663,12 +578,6 @@
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
   Object** parameters = reinterpret_cast<Object**>(args[1]);
   CONVERT_SMI_ARG_CHECKED(argument_count, 2);
-#ifdef DEBUG
-  // This runtime function does not materialize the correct arguments when the
-  // caller has been inlined, better make sure we are not hitting that case.
-  JavaScriptFrameIterator it(isolate);
-  DCHECK(!it.frame()->HasInlinedFrames());
-#endif  // DEBUG
   ParameterArguments argument_getter(parameters);
   return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
 }
@@ -711,7 +620,7 @@
 
     if (IsLexicalVariableMode(mode)) {
       LookupIterator it(global_object, name, global_object,
-                        LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+                        LookupIterator::OWN_SKIP_INTERCEPTOR);
       Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
       if (!maybe.IsJust()) return isolate->heap()->exception();
       if ((maybe.FromJust() & DONT_DELETE) != 0) {
@@ -875,8 +784,7 @@
         case VAR:
         case LET:
         case CONST:
-        case CONST_LEGACY:
-        case IMPORT: {
+        case CONST_LEGACY: {
           PropertyAttributes attr =
               IsImmutableVariableMode(mode) ? FROZEN : SEALED;
           Handle<AccessorInfo> info =
@@ -961,23 +869,14 @@
     Handle<Object> value = handle(Context::cast(*holder)->get(index), isolate);
     // Check for uninitialized bindings.
     switch (flags) {
-      case MUTABLE_CHECK_INITIALIZED:
-      case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
+      case BINDING_CHECK_INITIALIZED:
         if (value->IsTheHole()) {
           THROW_NEW_ERROR(isolate,
                           NewReferenceError(MessageTemplate::kNotDefined, name),
                           Object);
         }
       // FALLTHROUGH
-      case IMMUTABLE_CHECK_INITIALIZED:
-        if (value->IsTheHole()) {
-          DCHECK(attributes & READ_ONLY);
-          value = isolate->factory()->undefined_value();
-        }
-      // FALLTHROUGH
-      case MUTABLE_IS_INITIALIZED:
-      case IMMUTABLE_IS_INITIALIZED:
-      case IMMUTABLE_IS_INITIALIZED_HARMONY:
+      case BINDING_IS_INITIALIZED:
         DCHECK(!value->IsTheHole());
         if (receiver_return) *receiver_return = receiver;
         return value;
@@ -1075,8 +974,7 @@
 
   // The property was found in a context slot.
   if (index != Context::kNotFound) {
-    if ((flags == MUTABLE_CHECK_INITIALIZED ||
-         flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY) &&
+    if (flags == BINDING_CHECK_INITIALIZED &&
         Handle<Context>::cast(holder)->is_the_hole(index)) {
       THROW_NEW_ERROR(isolate,
                       NewReferenceError(MessageTemplate::kNotDefined, name),
diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc
index 6786fa9..0f19bf3 100644
--- a/src/runtime/runtime-strings.cc
+++ b/src/runtime/runtime-strings.cc
@@ -5,10 +5,7 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
-#include "src/conversions-inl.h"
-#include "src/isolate-inl.h"
 #include "src/regexp/jsregexp-inl.h"
-#include "src/regexp/jsregexp.h"
 #include "src/string-builder.h"
 #include "src/string-search.h"
 
@@ -1080,7 +1077,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(args.length(), 1);
   CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   return ConvertCase(s, isolate, isolate->runtime_state()->to_lower_mapping());
 }
@@ -1088,7 +1085,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(args.length(), 1);
   CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   return ConvertCase(s, isolate, isolate->runtime_state()->to_upper_mapping());
 }
@@ -1154,6 +1151,7 @@
   return *result;
 }
 
+
 RUNTIME_FUNCTION(Runtime_StringLessThan) {
   HandleScope handle_scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -1273,6 +1271,13 @@
   return __RT_impl_Runtime_StringCharFromCode(Arguments(1, &code), isolate);
 }
 
+RUNTIME_FUNCTION(Runtime_ExternalStringGetChar) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_CHECKED(ExternalString, string, 0);
+  CONVERT_INT32_ARG_CHECKED(index, 1);
+  return Smi::FromInt(string->Get(index));
+}
 
 RUNTIME_FUNCTION(Runtime_OneByteSeqStringGetChar) {
   SealHandleScope shs(isolate);
diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc
index a0f0566..cc15d0e 100644
--- a/src/runtime/runtime-test.cc
+++ b/src/runtime/runtime-test.cc
@@ -8,6 +8,7 @@
 #include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/isolate-inl.h"
 #include "src/snapshot/natives.h"
 
 namespace v8 {
@@ -16,7 +17,16 @@
 RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+  // This function is used by fuzzers to get coverage in compiler.
+  // Ignore calls on non-function objects to avoid runtime errors.
+  CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+  // If it is not a JSFunction, just return.
+  if (!function_object->IsJSFunction()) {
+    return isolate->heap()->undefined_value();
+  }
+  Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
   if (!function->IsOptimized()) return isolate->heap()->undefined_value();
 
   // TODO(turbofan): Deoptimization is not supported yet.
@@ -84,7 +94,16 @@
 RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
   HandleScope scope(isolate);
   RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+  // This function is used by fuzzers to get coverage for optimizations
+  // in compiler. Ignore calls on non-function objects to avoid runtime errors.
+  CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+  // If it is not a JSFunction, just return.
+  if (!function_object->IsJSFunction()) {
+    return isolate->heap()->undefined_value();
+  }
+  Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
   // The following assertion was lifted from the DCHECK inside
   // JSFunction::MarkForOptimization().
   RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
@@ -135,6 +154,12 @@
   RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
                  !function->shared()->optimization_disabled());
 
+  // If function is interpreted, just return. OSR is not supported.
+  // TODO(4764): Remove this check when OSR is enabled in the interpreter.
+  if (function->shared()->HasBytecodeArray()) {
+    return isolate->heap()->undefined_value();
+  }
+
   // If the function is already optimized, just return.
   if (function->IsOptimized()) return isolate->heap()->undefined_value();
 
@@ -153,7 +178,8 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
-  function->shared()->set_disable_optimization_reason(kOptimizationDisabled);
+  function->shared()->set_disable_optimization_reason(
+      kOptimizationDisabledForTest);
   function->shared()->set_optimization_disabled(true);
   return isolate->heap()->undefined_value();
 }
@@ -457,6 +483,31 @@
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
+  HandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, exception_obj, 0);
+
+  Factory* factory = isolate->factory();
+  Handle<JSMessageObject> message_obj =
+      isolate->CreateMessage(exception_obj, nullptr);
+
+  Handle<JSObject> message = factory->NewJSObject(isolate->object_function());
+
+  Handle<String> key;
+  Handle<Object> value;
+
+  key = factory->NewStringFromAsciiChecked("start_pos");
+  value = handle(Smi::FromInt(message_obj->start_position()), isolate);
+  JSObject::SetProperty(message, key, value, STRICT).Assert();
+
+  key = factory->NewStringFromAsciiChecked("end_pos");
+  value = handle(Smi::FromInt(message_obj->end_position()), isolate);
+  JSObject::SetProperty(message, key, value, STRICT).Assert();
+
+  return *message;
+}
+
 RUNTIME_FUNCTION(Runtime_HaveSameMap) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 2);
diff --git a/src/runtime/runtime-typedarray.cc b/src/runtime/runtime-typedarray.cc
index bf0ee9f..14b1207 100644
--- a/src/runtime/runtime-typedarray.cc
+++ b/src/runtime/runtime-typedarray.cc
@@ -28,6 +28,14 @@
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3);
+
+  if (source->was_neutered() || target->was_neutered()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "ArrayBuffer.prototype.slice")));
+  }
+
   RUNTIME_ASSERT(!source.is_identical_to(target));
   size_t start = 0, target_length = 0;
   RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start));
@@ -397,7 +405,8 @@
   Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
   return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
                                     obj->type() != kExternalFloat32Array &&
-                                    obj->type() != kExternalFloat64Array);
+                                    obj->type() != kExternalFloat64Array &&
+                                    obj->type() != kExternalUint8ClampedArray);
 }
 
 
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index dc1678b..2c80280 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -47,25 +47,26 @@
   F(GrowArrayElements, 2, 1)         \
   F(HasComplexElements, 1, 1)        \
   F(IsArray, 1, 1)                   \
+  F(ArrayIsArray, 1, 1)              \
   F(HasCachedArrayIndex, 1, 1)       \
   F(GetCachedArrayIndex, 1, 1)       \
   F(FixedArrayGet, 2, 1)             \
   F(FixedArraySet, 3, 1)             \
   F(ArraySpeciesConstructor, 1, 1)
 
-#define FOR_EACH_INTRINSIC_ATOMICS(F) \
-  F(AtomicsCompareExchange, 4, 1)     \
-  F(AtomicsLoad, 2, 1)                \
-  F(AtomicsStore, 3, 1)               \
-  F(AtomicsAdd, 3, 1)                 \
-  F(AtomicsSub, 3, 1)                 \
-  F(AtomicsAnd, 3, 1)                 \
-  F(AtomicsOr, 3, 1)                  \
-  F(AtomicsXor, 3, 1)                 \
-  F(AtomicsExchange, 3, 1)            \
+#define FOR_EACH_INTRINSIC_ATOMICS(F)           \
+  F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
+  F(ThrowNotInt32SharedTypedArrayError, 1, 1)   \
+  F(ThrowInvalidAtomicAccessIndexError, 0, 1)   \
+  F(AtomicsCompareExchange, 4, 1)               \
+  F(AtomicsAdd, 3, 1)                           \
+  F(AtomicsSub, 3, 1)                           \
+  F(AtomicsAnd, 3, 1)                           \
+  F(AtomicsOr, 3, 1)                            \
+  F(AtomicsXor, 3, 1)                           \
+  F(AtomicsExchange, 3, 1)                      \
   F(AtomicsIsLockFree, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_FUTEX(F)  \
   F(AtomicsFutexWait, 4, 1)          \
   F(AtomicsFutexWake, 3, 1)          \
@@ -81,7 +82,6 @@
   F(ThrowIfStaticPrototype, 1, 1)           \
   F(HomeObjectSymbol, 0, 1)                 \
   F(DefineClass, 4, 1)                      \
-  F(FinalizeClassDefinition, 2, 1)          \
   F(LoadFromSuper, 3, 1)                    \
   F(LoadKeyedFromSuper, 3, 1)               \
   F(StoreToSuper_Strict, 4, 1)              \
@@ -117,20 +117,18 @@
   F(WeakCollectionHas, 3, 1)              \
   F(WeakCollectionDelete, 3, 1)           \
   F(WeakCollectionSet, 4, 1)              \
-  F(GetWeakSetValues, 2, 1)               \
-  F(ObservationWeakMapCreate, 0, 1)
-
+  F(GetWeakSetValues, 2, 1)
 
 #define FOR_EACH_INTRINSIC_COMPILER(F)    \
   F(CompileLazy, 1, 1)                    \
+  F(CompileBaseline, 1, 1)                \
   F(CompileOptimized_Concurrent, 1, 1)    \
   F(CompileOptimized_NotConcurrent, 1, 1) \
   F(NotifyStubFailure, 0, 1)              \
   F(NotifyDeoptimized, 1, 1)              \
   F(CompileForOnStackReplacement, 1, 1)   \
   F(TryInstallOptimizedCode, 1, 1)        \
-  F(ResolvePossiblyDirectEval, 5, 1)
-
+  F(ResolvePossiblyDirectEval, 6, 1)
 
 #define FOR_EACH_INTRINSIC_DATE(F) \
   F(IsDate, 1, 1)                  \
@@ -148,9 +146,6 @@
   F(DebugGetProperty, 2, 1)                    \
   F(DebugPropertyTypeFromDetails, 1, 1)        \
   F(DebugPropertyAttributesFromDetails, 1, 1)  \
-  F(DebugPropertyIndexFromDetails, 1, 1)       \
-  F(DebugNamedInterceptorPropertyValue, 2, 1)  \
-  F(DebugIndexedInterceptorElementValue, 2, 1) \
   F(CheckExecutionState, 1, 1)                 \
   F(GetFrameCount, 1, 1)                       \
   F(GetFrameDetails, 2, 1)                     \
@@ -161,8 +156,6 @@
   F(GetFunctionScopeDetails, 2, 1)             \
   F(SetScopeVariableValue, 6, 1)               \
   F(DebugPrintScopes, 0, 1)                    \
-  F(GetThreadCount, 1, 1)                      \
-  F(GetThreadDetails, 2, 1)                    \
   F(SetBreakPointsActive, 1, 1)                \
   F(GetBreakLocations, 2, 1)                   \
   F(SetFunctionBreakPoint, 3, 1)               \
@@ -231,19 +224,19 @@
   F(FunctionToString, 1, 1)
 
 #define FOR_EACH_INTRINSIC_GENERATOR(F) \
-  F(CreateJSGeneratorObject, 0, 1)      \
+  F(CreateJSGeneratorObject, 2, 1)      \
   F(SuspendJSGeneratorObject, 1, 1)     \
-  F(ResumeJSGeneratorObject, 3, 1)      \
   F(GeneratorClose, 1, 1)               \
   F(GeneratorGetFunction, 1, 1)         \
   F(GeneratorGetReceiver, 1, 1)         \
   F(GeneratorGetInput, 1, 1)            \
+  F(GeneratorSetContext, 1, 1)          \
   F(GeneratorGetContinuation, 1, 1)     \
+  F(GeneratorSetContinuation, 2, 1)     \
   F(GeneratorGetSourcePosition, 1, 1)   \
-  F(GeneratorNext, 2, 1)                \
-  F(GeneratorReturn, 2, 1)              \
-  F(GeneratorThrow, 2, 1)
-
+  F(GeneratorGetResumeMode, 1, 1)       \
+  F(GeneratorLoadRegister, 2, 1)        \
+  F(GeneratorStoreRegister, 3, 1)
 
 #ifdef V8_I18N_SUPPORT
 #define FOR_EACH_INTRINSIC_I18N(F)           \
@@ -269,7 +262,10 @@
   F(BreakIteratorFirst, 1, 1)                \
   F(BreakIteratorNext, 1, 1)                 \
   F(BreakIteratorCurrent, 1, 1)              \
-  F(BreakIteratorBreakType, 1, 1)
+  F(BreakIteratorBreakType, 1, 1)            \
+  F(StringToLowerCaseI18N, 1, 1)             \
+  F(StringToUpperCaseI18N, 1, 1)             \
+  F(StringLocaleConvertCase, 3, 1)
 #else
 #define FOR_EACH_INTRINSIC_I18N(F)
 #endif
@@ -289,8 +285,11 @@
   F(NewSyntaxError, 2, 1)                           \
   F(NewReferenceError, 2, 1)                        \
   F(ThrowIllegalInvocation, 0, 1)                   \
+  F(ThrowIncompatibleMethodReceiver, 2, 1)          \
   F(ThrowIteratorResultNotAnObject, 1, 1)           \
+  F(ThrowGeneratorRunning, 0, 1)                    \
   F(ThrowStackOverflow, 0, 1)                       \
+  F(ThrowWasmError, 2, 1)                           \
   F(PromiseRejectEvent, 3, 1)                       \
   F(PromiseRevokeReject, 1, 1)                      \
   F(StackGuard, 0, 1)                               \
@@ -315,10 +314,14 @@
   F(ThrowConstructedNonConstructable, 1, 1)         \
   F(ThrowDerivedConstructorReturnedNonObject, 0, 1) \
   F(ThrowCalledNonCallable, 1, 1)                   \
+  F(ThrowCalledOnNullOrUndefined, 1, 1)             \
   F(CreateListFromArrayLike, 1, 1)                  \
   F(IncrementUseCounter, 1, 1)                      \
-  F(GetOrdinaryHasInstance, 0, 1)                   \
-  F(GetAndResetRuntimeCallStats, 0, 1)
+  F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1)  \
+  F(EnqueueMicrotask, 1, 1)                         \
+  F(RunMicrotasks, 0, 1)                            \
+  F(WasmGetFunctionName, 2, 1)                      \
+  F(OrdinaryHasInstance, 2, 1)
 
 #define FOR_EACH_INTRINSIC_JSON(F) \
   F(QuoteJSONString, 1, 1)         \
@@ -385,7 +388,6 @@
   F(ObjectHasOwnProperty, 2, 1)                      \
   F(InternalSetPrototype, 2, 1)                      \
   F(SetPrototype, 2, 1)                              \
-  F(GetOwnProperty_Legacy, 2, 1)                     \
   F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
   F(GetProperty, 2, 1)                               \
   F(KeyedGetProperty, 2, 1)                          \
@@ -406,13 +408,10 @@
   F(AllocateHeapNumber, 0, 1)                        \
   F(NewObject, 2, 1)                                 \
   F(FinalizeInstanceSize, 1, 1)                      \
-  F(GlobalProxy, 1, 1)                               \
-  F(LookupAccessor, 3, 1)                            \
   F(LoadMutableDouble, 2, 1)                         \
   F(TryMigrateInstance, 1, 1)                        \
   F(IsJSGlobalProxy, 1, 1)                           \
   F(DefineAccessorPropertyUnchecked, 5, 1)           \
-  F(DefineDataPropertyUnchecked, 4, 1)               \
   F(DefineDataPropertyInLiteral, 5, 1)               \
   F(GetDataProperty, 2, 1)                           \
   F(HasFastPackedElements, 1, 1)                     \
@@ -433,26 +432,10 @@
   F(SameValue, 2, 1)                                 \
   F(SameValueZero, 2, 1)                             \
   F(Compare, 3, 1)                                   \
-  F(InstanceOf, 2, 1)                                \
-  F(OrdinaryHasInstance, 2, 1)                       \
   F(HasInPrototypeChain, 2, 1)                       \
   F(CreateIterResultObject, 2, 1)                    \
   F(IsAccessCheckNeeded, 1, 1)                       \
-  F(ObjectDefineProperties, 2, 1)                    \
-  F(ObjectDefineProperty, 3, 1)
-
-#define FOR_EACH_INTRINSIC_OBSERVE(F)            \
-  F(IsObserved, 1, 1)                            \
-  F(SetIsObserved, 1, 1)                         \
-  F(EnqueueMicrotask, 1, 1)                      \
-  F(RunMicrotasks, 0, 1)                         \
-  F(DeliverObservationChangeRecords, 2, 1)       \
-  F(GetObservationState, 0, 1)                   \
-  F(ObserverObjectAndRecordHaveSameOrigin, 3, 1) \
-  F(ObjectWasCreatedInCurrentOrigin, 1, 1)       \
-  F(GetObjectContextObjectObserve, 1, 1)         \
-  F(GetObjectContextObjectGetNotifier, 1, 1)     \
-  F(GetObjectContextNotifierPerformChange, 1, 1)
+  F(CreateDataProperty, 3, 1)
 
 #define FOR_EACH_INTRINSIC_OPERATORS(F) \
   F(Multiply, 2, 1)                     \
@@ -473,7 +456,8 @@
   F(LessThan, 2, 1)                     \
   F(GreaterThan, 2, 1)                  \
   F(LessThanOrEqual, 2, 1)              \
-  F(GreaterThanOrEqual, 2, 1)
+  F(GreaterThanOrEqual, 2, 1)           \
+  F(InstanceOf, 2, 1)
 
 #define FOR_EACH_INTRINSIC_PROXY(F)     \
   F(IsJSProxy, 1, 1)                    \
@@ -501,7 +485,6 @@
   F(InitializeVarGlobal, 3, 1)             \
   F(InitializeConstGlobal, 2, 1)           \
   F(DeclareLookupSlot, 3, 1)               \
-  F(InitializeLegacyConstLookupSlot, 3, 1) \
   F(NewSloppyArguments_Generic, 1, 1)      \
   F(NewStrictArguments, 1, 1)              \
   F(NewRestParameter, 1, 1)                \
@@ -861,6 +844,7 @@
   F(FlattenString, 1, 1)                  \
   F(StringCharFromCode, 1, 1)             \
   F(StringCharAt, 2, 1)                   \
+  F(ExternalStringGetChar, 2, 1)          \
   F(OneByteSeqStringGetChar, 2, 1)        \
   F(OneByteSeqStringSetChar, 3, 1)        \
   F(TwoByteSeqStringGetChar, 2, 1)        \
@@ -892,6 +876,7 @@
   F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
   F(DebugPrint, 1, 1)                         \
   F(DebugTrace, 0, 1)                         \
+  F(GetExceptionDetails, 1, 1)                \
   F(GlobalPrint, 1, 1)                        \
   F(SystemBreak, 0, 1)                        \
   F(SetFlags, 1, 1)                           \
@@ -1019,7 +1004,6 @@
   FOR_EACH_INTRINSIC_MATHS(F)               \
   FOR_EACH_INTRINSIC_NUMBERS(F)             \
   FOR_EACH_INTRINSIC_OBJECT(F)              \
-  FOR_EACH_INTRINSIC_OBSERVE(F)             \
   FOR_EACH_INTRINSIC_OPERATORS(F)           \
   FOR_EACH_INTRINSIC_PROXY(F)               \
   FOR_EACH_INTRINSIC_REGEXP(F)              \
@@ -1110,6 +1094,12 @@
   MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
       Isolate* isolate, Handle<Object> object, Handle<Object> key);
 
+  MUST_USE_RESULT static MaybeHandle<Object> BasicJsonStringify(
+      Isolate* isolate, Handle<Object> object);
+
+  MUST_USE_RESULT static MaybeHandle<Object> BasicJsonStringifyString(
+      Isolate* isolate, Handle<String> string);
+
   enum TypedArrayId {
     // arrayIds below should be synchronized with typedarray.js natives.
     ARRAY_ID_UINT8 = 1,
@@ -1129,11 +1119,6 @@
                                    ElementsKind* fixed_elements_kind,
                                    size_t* element_size);
 
-  // Used in runtime.cc and hydrogen's VisitArrayLiteral.
-  MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
-      Isolate* isolate, Handle<LiteralsArray> literals,
-      Handle<FixedArray> elements);
-
   static MaybeHandle<JSArray> GetInternalProperties(Isolate* isolate,
                                                     Handle<Object>);
 };
diff --git a/src/s390/assembler-s390-inl.h b/src/s390/assembler-s390-inl.h
index 400d543..b5ace13 100644
--- a/src/s390/assembler-s390-inl.h
+++ b/src/s390/assembler-s390-inl.h
@@ -93,11 +93,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Assembler::target_address_at(pc_, host_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
          rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
@@ -156,19 +151,6 @@
   return code_targets_[index];
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
-                                   icache_flush_mode);
-}
-
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -312,6 +294,7 @@
   }
 }
 
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/s390/assembler-s390.cc b/src/s390/assembler-s390.cc
index 35ba431..9aa2aab 100644
--- a/src/s390/assembler-s390.cc
+++ b/src/s390/assembler-s390.cc
@@ -217,6 +217,44 @@
 
 bool RelocInfo::IsInConstantPool() { return false; }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return static_cast<uint32_t>(
+      reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_memory_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_memory_reference &&
+           updated_memory_reference < new_base + new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Assembler::set_target_address_at(
+        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
+        icache_flush_mode);
+  } else {
+    UNREACHABLE();
+  }
+}
+
 // -----------------------------------------------------------------------------
 // Implementation of Operand and MemOperand
 // See assembler-s390-inl.h for inlined constructors
diff --git a/src/s390/assembler-s390.h b/src/s390/assembler-s390.h
index 0b9fa38..28cdbb6 100644
--- a/src/s390/assembler-s390.h
+++ b/src/s390/assembler-s390.h
@@ -90,6 +90,8 @@
   V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
   V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
 
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+
 #define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
   V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)         \
   V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d15) V(d0)
@@ -221,7 +223,10 @@
   int reg_code;
 };
 
-typedef DoubleRegister DoubleRegister;
+typedef DoubleRegister FloatRegister;
+
+// TODO(john.yan) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
 
 #define DECLARE_REGISTER(R) \
   const DoubleRegister R = {DoubleRegister::kCode_##R};
@@ -265,9 +270,6 @@
 const CRegister cr6 = {6};
 const CRegister cr7 = {7};
 
-// TODO(john.yan) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
 // -----------------------------------------------------------------------------
 // Machine instruction Operands
 
@@ -1241,7 +1243,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   // Writes a single byte or word of data in the code stream.  Used
   // for inline tables, e.g., jump-tables.
diff --git a/src/s390/builtins-s390.cc b/src/s390/builtins-s390.cc
index 12b52c1..8c2283f 100644
--- a/src/s390/builtins-s390.cc
+++ b/src/s390/builtins-s390.cc
@@ -596,15 +596,10 @@
     // r2: number of arguments
     // r3: constructor function
     // r5: new target
-    if (is_api_function) {
-      __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
-      Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ Call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(r2);
-      __ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+
+    ParameterCount actual(r2);
+    __ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -693,6 +688,150 @@
   Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the value to pass to the generator
+  //  -- r3 : the JSGeneratorObject to resume
+  //  -- r4 : the resume mode (tagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(r3);
+
+  // Store input value into generator object.
+  __ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOffset), r0);
+  __ RecordWriteField(r3, JSGeneratorObject::kInputOffset, r2, r5,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
+
+  // Load suspended function and context.
+  __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
+  __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  __ mov(ip, Operand(step_in_enabled));
+  __ LoadlB(ip, MemOperand(ip));
+  __ CmpP(ip, Operand::Zero());
+  __ beq(&skip_flooding);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r3, r4, r6);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r3, r4);
+    __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Push receiver.
+  __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+  __ Push(ip);
+
+  // ----------- S t a t e -------------
+  //  -- r3    : the JSGeneratorObject to resume
+  //  -- r4    : the resume mode (tagged)
+  //  -- r6    : generator function
+  //  -- cp    : generator context
+  //  -- lr    : return address
+  //  -- sp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadW(
+      r2, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label loop, done_loop;
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+#if V8_TARGET_ARCH_S390X
+    __ CmpP(r2, Operand::Zero());
+    __ beq(&done_loop);
+#else
+    __ SmiUntag(r2);
+    __ LoadAndTestP(r2, r2);
+    __ beq(&done_loop);
+#endif
+    __ LoadRR(r1, r2);
+    __ bind(&loop);
+    __ push(ip);
+    __ BranchOnCount(r1, &loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+  __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
+  __ bne(&old_generator, Label::kNear);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ LoadRR(r5, r3);
+    __ LoadRR(r3, r6);
+    __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
+    __ JumpToJSEntry(ip);
+  }
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushStandardFrame(r6);
+
+    // Restore the operand stack.
+    __ LoadP(r2, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
+    __ LoadP(r5, FieldMemOperand(r2, FixedArray::kLengthOffset));
+    __ AddP(r2, r2,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    {
+      Label loop, done_loop;
+      __ SmiUntag(r5);
+      __ LoadAndTestP(r5, r5);
+      __ beq(&done_loop);
+      __ LoadRR(r1, r5);
+      __ bind(&loop);
+      __ LoadP(ip, MemOperand(r2, kPointerSize));
+      __ la(r2, MemOperand(r2, kPointerSize));
+      __ Push(ip);
+      __ BranchOnCount(r1, &loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+    __ StoreP(ip, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset),
+              r0);
+
+    // Resume the generator function at the continuation.
+    __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+    __ AddP(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+    {
+      ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+      __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+      __ SmiUntag(r4);
+      __ AddP(r5, r5, r4);
+      __ LoadSmiLiteral(r4,
+                        Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+      __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+                r0);
+      __ LoadRR(r2, r3);  // Continuation expects generator object in r2.
+      __ Jump(r5);
+    }
+  }
+}
+
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
   __ push(r3);
@@ -835,14 +974,16 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ PushStandardFrame(r3);
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into kInterpreterBytecodeRegister.
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
   Label array_done;
   Register debug_info = r4;
@@ -858,8 +999,13 @@
            FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
   __ bind(&array_done);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ CompareRoot(kInterpreterBytecodeArrayRegister,
+                 Heap::kUndefinedValueRootIndex);
+  __ beq(&bytecode_array_not_present);
+
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ TestIfSmi(kInterpreterBytecodeArrayRegister);
     __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
     __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
@@ -867,9 +1013,13 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
-  // Push new.target, bytecode array and zero for bytecode array offset.
-  __ LoadImmP(r2, Operand::Zero());
-  __ Push(r5, kInterpreterBytecodeArrayRegister, r2);
+  // Load the initial bytecode offset.
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push new.target, bytecode array and Smi tagged bytecode array offset.
+  __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
+  __ Push(r5, kInterpreterBytecodeArrayRegister, r4);
 
   // Allocate the local and temporary register file on the stack.
   {
@@ -901,18 +1051,8 @@
     __ bind(&no_args);
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load accumulator, register file, bytecode offset, dispatch table into
-  // registers.
+  // Load accumulator and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ AddP(kInterpreterRegisterFileRegister, fp,
-          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ mov(kInterpreterBytecodeOffsetRegister,
-         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
   __ mov(kInterpreterDispatchTableRegister,
          Operand(ExternalReference::interpreter_dispatch_table_address(
              masm->isolate())));
@@ -922,32 +1062,33 @@
                            kInterpreterBytecodeOffsetRegister));
   __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
   __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(ip);
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
-}
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
+  // The return value is in r2.
 
-  // The return value is in accumulator, which is already in r2.
+  // Get the arguments + reciever count.
+  __ LoadP(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ LoadlW(r4, FieldMemOperand(r4, BytecodeArray::kParameterSizeOffset));
 
   // Leave the frame (also dropping the register file).
   __ LeaveFrame(StackFrame::JAVA_SCRIPT);
 
-  // Drop receiver + arguments and return.
-  __ LoadlW(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
-                                BytecodeArray::kParameterSizeOffset));
-  __ AddP(sp, sp, r0);
+  __ lay(sp, MemOperand(sp, r4));
   __ Ret();
+
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ bind(&bytecode_array_not_present);
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
+  __ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(r3, r6, r7);
+  __ JumpToJSEntry(r6);
 }
 
 static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
@@ -1010,24 +1151,24 @@
   __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register and dispatch table register.
-  __ AddP(kInterpreterRegisterFileRegister, fp,
-          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ Move(r4, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
+                           Code::kHeaderSize - kHeapObjectTag));
+
+  // Initialize the dispatch table register.
   __ mov(kInterpreterDispatchTableRegister,
          Operand(ExternalReference::interpreter_dispatch_table_address(
              masm->isolate())));
 
-  // Get the context from the frame.
-  __ LoadP(kContextRegister,
-           MemOperand(kInterpreterRegisterFileRegister,
-                      InterpreterFrameConstants::kContextFromRegisterPointer));
-
   // Get the bytecode array pointer from the frame.
-  __ LoadP(
-      kInterpreterBytecodeArrayRegister,
-      MemOperand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1040,9 +1181,7 @@
 
   // Get the target bytecode offset from the frame.
   __ LoadP(kInterpreterBytecodeOffsetRegister,
-           MemOperand(
-               kInterpreterRegisterFileRegister,
-               InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
   // Dispatch to the target bytecode.
@@ -1050,59 +1189,157 @@
                            kInterpreterBytecodeOffsetRegister));
   __ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
   __ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
-  __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(ip);
 }
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
-    __ Push(r3);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register.
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ mov(r14,
-         Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : argument count (preserved for callee)
+  //  -- r5 : new target (preserved for callee)
+  //  -- r3 : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = r3;
+  Register map = r8;
+  Register index = r4;
+  __ LoadP(map,
+           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(map,
+           FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
+  __ blt(&gotta_call_runtime);
+
+  // Find literals.
+  // r9 : native context
+  // r4  : length / index
+  // r8  : optimized code map
+  // r5  : new target
+  // r3  : closure
+  Register native_context = r9;
+  __ LoadP(native_context, NativeContextMemOperand());
+
+  __ bind(&loop_top);
+  Register temp = r1;
+  Register array_pointer = r7;
+
+  // Does the native context match?
+  __ SmiToPtrArrayOffset(array_pointer, index);
+  __ AddP(array_pointer, map, array_pointer);
+  __ LoadP(temp, FieldMemOperand(array_pointer,
+                                 SharedFunctionInfo::kOffsetToPreviousContext));
+  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ CmpP(temp, native_context);
+  __ bne(&loop_bottom, Label::kNear);
+  // OSR id set to none?
+  __ LoadP(temp,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
+  __ bne(&loop_bottom, Label::kNear);
+  // Literals available?
+  __ LoadP(temp,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
+  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r6,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Code available?
+  Register entry = r6;
+  __ LoadP(entry,
+           FieldMemOperand(array_pointer,
+                           SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  // Store code entry in the closure.
+  __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(closure, entry, r7);
+
+  // Link the closure into the optimized function list.
+  // r6 : code entry
+  // r9: native context
+  // r3 : closure
+  __ LoadP(
+      r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
+            r0);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, temp,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ StoreP(
+      closure,
+      ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
+  // Save closure before the write barrier.
+  __ LoadRR(r7, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, r7, temp,
+                            kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ JumpToJSEntry(entry);
+
+  __ bind(&loop_bottom);
+  __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
+                   r0);
+  __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
+  __ bgt(&loop_top);
+
+  // We found neither literals nor code.
+  __ b(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ LoadP(entry,
+           FieldMemOperand(map, FixedArray::kHeaderSize +
+                                    SharedFunctionInfo::kSharedCodeIndex));
+  __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ b(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  // Is the full code valid?
+  __ LoadP(entry,
+           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ LoadlW(r7, FieldMemOperand(entry, Code::kFlagsOffset));
+  __ DecodeField<Code::KindField>(r7);
+  __ CmpP(r7, Operand(Code::BUILTIN));
+  __ beq(&gotta_call_runtime);
+  // Yes, install the full code.
+  __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+  __ RecordWriteCodeEntryField(closure, entry, r7);
+  __ JumpToJSEntry(entry);
+
+  __ bind(&gotta_call_runtime);
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
+
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
                                  Runtime::kCompileOptimized_NotConcurrent);
@@ -1242,14 +1479,19 @@
   __ SmiUntag(r8);
   // Switch on the state.
   Label with_tos_register, unknown_state;
-  __ CmpP(r8, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ CmpP(
+      r8,
+      Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
   __ bne(&with_tos_register);
   __ la(sp, MemOperand(sp, 1 * kPointerSize));  // Remove state.
   __ Ret();
 
   __ bind(&with_tos_register);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r2.code());
   __ LoadP(r2, MemOperand(sp, 1 * kPointerSize));
-  __ CmpP(r8, Operand(FullCodeGenerator::TOS_REG));
+  __ CmpP(
+      r8,
+      Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
   __ bne(&unknown_state);
   __ la(sp, MemOperand(sp, 2 * kPointerSize));  // Remove state.
   __ Ret();
@@ -1457,28 +1699,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r2    : argc
-  //  -- sp[0] : first argument (left-hand side)
-  //  -- sp[4] : receiver (right-hand side)
-  // -----------------------------------
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ LoadP(InstanceOfDescriptor::LeftRegister(),
-             MemOperand(fp, 2 * kPointerSize));  // Load left-hand side.
-    __ LoadP(InstanceOfDescriptor::RightRegister(),
-             MemOperand(fp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ Ret(2);
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2    : argc
@@ -2416,6 +2636,31 @@
           RelocInfo::CODE_TARGET);
 }
 
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r3);
+  __ Push(r3);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : requested object size (untagged)
+  //  -- lr : return address
+  // -----------------------------------
+  __ SmiTag(r3);
+  __ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ Push(r3, r4);
+  __ LoadSmiLiteral(cp, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2 : actual number of arguments
diff --git a/src/s390/code-stubs-s390.cc b/src/s390/code-stubs-s390.cc
index 1c7d27b..e1e2003 100644
--- a/src/s390/code-stubs-s390.cc
+++ b/src/s390/code-stubs-s390.cc
@@ -51,11 +51,6 @@
   }
 }
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -66,11 +61,6 @@
   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1377,125 +1367,6 @@
   __ b(r14);
 }
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = r3;              // Object (lhs).
-  Register const function = r2;            // Function (rhs).
-  Register const object_map = r4;          // Map of {object}.
-  Register const function_map = r5;        // Map of {function}.
-  Register const function_prototype = r6;  // Prototype of {function}.
-  Register const scratch = r7;
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ bne(&fast_case);
-  __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-  __ bne(&fast_case);
-  __ LoadRoot(r2, Heap::kInstanceofCacheAnswerRootIndex);
-  __ Ret();
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
-  __ bne(&slow_case);
-  __ LoadRoot(r2, Heap::kFalseValueRootIndex);
-  __ Ret();
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
-  __ bne(&slow_case);
-
-  // Go to the runtime if the function is not a constructor.
-  __ LoadlB(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
-  __ TestBit(scratch, Map::kIsConstructor, r0);
-  __ beq(&slow_case);
-
-  // Ensure that {function} has an instance prototype.
-  __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
-  __ bne(&slow_case);
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ LoadP(function_prototype,
-           FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
-  __ bne(&function_prototype_valid);
-  __ LoadP(function_prototype,
-           FieldMemOperand(function_prototype, Map::kPrototypeOffset));
-  __ bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Register const object_instance_type = function_map;
-  Register const map_bit_field = function_map;
-  Register const null = scratch;
-  Register const result = r2;
-
-  Label done, loop, fast_runtime_fallback;
-  __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ LoadRoot(null, Heap::kNullValueRootIndex);
-  __ bind(&loop);
-
-  // Check if the object needs to be access checked.
-  __ LoadlB(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
-  __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
-  __ bne(&fast_runtime_fallback);
-  // Check if the current object is a Proxy.
-  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
-  __ beq(&fast_runtime_fallback);
-
-  __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
-  __ CmpP(object, function_prototype);
-  __ beq(&done);
-  __ CmpP(object, null);
-  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ bne(&loop);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-  __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
-  __ Ret();
-
-  // Found Proxy or access check needed: Call the runtime
-  __ bind(&fast_runtime_fallback);
-  __ Push(object, function_prototype);
-  // Invalidate the instanceof cache.
-  __ LoadSmiLiteral(scratch, Smi::FromInt(0));
-  __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ Push(object, function);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
@@ -3898,8 +3769,8 @@
   __ bind(&not_array);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ bne(&miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
                                                receiver, name, feedback,
                                                receiver_map, scratch1, r9);
@@ -4033,8 +3904,8 @@
   __ bind(&not_array);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ bne(&miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
       scratch1, scratch2);
@@ -4625,15 +4496,15 @@
   __ bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ StoreP(r4, MemOperand(r2, JSObject::kMapOffset));
+  __ StoreP(r4, FieldMemOperand(r2, JSObject::kMapOffset));
   __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r5, MemOperand(r2, JSObject::kPropertiesOffset));
-  __ StoreP(r5, MemOperand(r2, JSObject::kElementsOffset));
+  __ StoreP(r5, FieldMemOperand(r2, JSObject::kPropertiesOffset));
+  __ StoreP(r5, FieldMemOperand(r2, JSObject::kElementsOffset));
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ AddP(r3, r2, Operand(JSObject::kHeaderSize));
+  __ AddP(r3, r2, Operand(JSObject::kHeaderSize - kHeapObjectTag));
 
   // ----------- S t a t e -------------
-  //  -- r2 : result (untagged)
+  //  -- r2 : result (tagged)
   //  -- r3 : result fields (untagged)
   //  -- r7 : result end (untagged)
   //  -- r4 : initial map
@@ -4653,8 +4524,6 @@
     // Initialize all in-object fields with undefined.
     __ InitializeFieldsWithFiller(r3, r7, r8);
 
-    // Add the object tag to make the JSObject real.
-    __ AddP(r2, r2, Operand(kHeapObjectTag));
     __ Ret();
   }
   __ bind(&slack_tracking);
@@ -4674,9 +4543,6 @@
     __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
     __ InitializeFieldsWithFiller(r3, r7, r8);
 
-    // Add the object tag to make the JSObject real.
-    __ AddP(r2, r2, Operand(kHeapObjectTag));
-
     // Check if we can finalize the instance size.
     __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
     __ Ret(ne);
@@ -4702,10 +4568,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(r4);
   }
-  __ SubP(r2, r2, Operand(kHeapObjectTag));
   __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
   __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
   __ AddP(r7, r2, r7);
+  __ SubP(r7, r7, Operand(kHeapObjectTag));
   __ b(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4723,20 +4589,20 @@
   // -----------------------------------
   __ AssertFunction(r3);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make r4 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ LoadRR(r4, fp);
-    __ b(&loop_entry);
-    __ bind(&loop);
+  // Make r4 point to the JavaScript frame.
+  __ LoadRR(r4, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
     __ CmpP(ip, r3);
-    __ bne(&loop);
+    __ b(&ok, Label::kNear);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4770,7 +4636,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the rest parameter array in r0.
@@ -4813,7 +4679,7 @@
     Label allocate, done_allocate;
     __ mov(r3, Operand(JSArray::kSize + FixedArray::kHeaderSize));
     __ AddP(r3, r3, r8);
-    __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT);
+    __ Allocate(r3, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in r5.
@@ -4870,6 +4736,22 @@
   // -----------------------------------
   __ AssertFunction(r3);
 
+  // Make r9 point to the JavaScript frame.
+  __ LoadRR(r9, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ LoadP(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ LoadP(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
+    __ CmpP(ip, r3);
+    __ beq(&ok, Label::kNear);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
+  }
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
   __ LoadW(
@@ -4878,19 +4760,20 @@
   __ SmiTag(r4);
 #endif
   __ SmiToPtrArrayOffset(r5, r4);
-  __ AddP(r5, fp, r5);
+  __ AddP(r5, r9, r5);
   __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
 
   // r3 : function
   // r4 : number of parameters (tagged)
   // r5 : parameters pointer
+  // r9 : JavaScript frame pointer
   // Registers used over whole function:
   // r7 : arguments count (tagged)
   // r8 : mapped parameter count (tagged)
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
-  __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r6, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
   __ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   __ beq(&adaptor_frame);
@@ -4943,7 +4826,7 @@
   __ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(r1, r2, r1, r6, &runtime, TAG_OBJECT);
+  __ Allocate(r1, r2, r1, r6, &runtime, NO_ALLOCATION_FLAGS);
 
   // r2 = address of new object(s) (tagged)
   // r4 = argument count (smi-tagged)
@@ -5107,20 +4990,20 @@
   // -----------------------------------
   __ AssertFunction(r3);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make r4 point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ LoadRR(r4, fp);
-    __ b(&loop_entry);
-    __ bind(&loop);
+  // Make r4 point to the JavaScript frame.
+  __ LoadRR(r4, fp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
     __ CmpP(ip, r3);
-    __ bne(&loop);
+    __ beq(&ok, Label::kNear);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -5161,7 +5044,7 @@
   Label allocate, done_allocate;
   __ mov(r3, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
   __ AddP(r3, r3, r8);
-  __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT);
+  __ Allocate(r3, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in r5.
@@ -5533,7 +5416,11 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
+
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
 
   // context save
   __ push(context);
@@ -5569,10 +5456,10 @@
   // it's not controlled by GC.
   // S390 LINUX ABI:
   //
-  // Create 5 extra slots on stack:
+  // Create 4 extra slots on stack:
   //    [0] space for DirectCEntryStub's LR save
-  //    [1-4] FunctionCallbackInfo
-  const int kApiStackSpace = 5;
+  //    [1-3] FunctionCallbackInfo
+  const int kApiStackSpace = 4;
   const int kFunctionCallbackInfoOffset =
       (kStackFrameExtraParamSlot + 1) * kPointerSize;
 
@@ -5591,9 +5478,6 @@
   // FunctionCallbackInfo::length_ = argc
   __ LoadImmP(ip, Operand(argc()));
   __ StoreW(ip, MemOperand(r2, 2 * kPointerSize));
-  // FunctionCallbackInfo::is_construct_call_ = 0
-  __ LoadImmP(ip, Operand::Zero());
-  __ StoreW(ip, MemOperand(r2, 2 * kPointerSize + kIntSize));
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_function_callback(masm->isolate());
@@ -5610,9 +5494,9 @@
   }
   MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
   int stack_space = 0;
-  MemOperand is_construct_call_operand =
-      MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
-  MemOperand* stack_space_operand = &is_construct_call_operand;
+  MemOperand length_operand =
+      MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
+  MemOperand* stack_space_operand = &length_operand;
   stack_space = argc() + FCA::kArgsLength + 1;
   stack_space_operand = NULL;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
@@ -5621,18 +5505,39 @@
 }
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- sp[0]                        : name
-  //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- r4                           : api_function_address
-  // -----------------------------------
-
-  Register api_function_address = ApiGetterDescriptor::function_address();
   int arg0Slot = 0;
   int accessorInfoSlot = 0;
   int apiStackSpace = 0;
-  DCHECK(api_function_address.is(r4));
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
+  Register scratch = r6;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+  Register api_function_address = r4;
+
+  __ push(receiver);
+  // Push data from AccessorInfo.
+  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+  __ push(scratch);
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Push(scratch, scratch);
+  __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+  __ Push(scratch, holder);
+  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+  __ push(scratch);
 
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5680,6 +5585,10 @@
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+  __ LoadP(api_function_address,
+           FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
   // +3 is to skip prolog, return address and name handle.
   MemOperand return_value_operand(
       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
diff --git a/src/s390/codegen-s390.cc b/src/s390/codegen-s390.cc
index 6636a7c..fe94c94 100644
--- a/src/s390/codegen-s390.cc
+++ b/src/s390/codegen-s390.cc
@@ -172,7 +172,7 @@
   __ SmiToDoubleArrayOffset(r14, length);
   __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
   __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
-
+  __ SubP(array, array, Operand(kHeapObjectTag));
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
   __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
@@ -302,12 +302,12 @@
   __ AddP(array_size, r0);
   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
               NO_ALLOCATION_FLAGS);
-  // array: destination FixedArray, not tagged as heap object
+  // array: destination FixedArray, tagged as heap object
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
-  __ AddP(array, Operand(kHeapObjectTag));
+  __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
+            r0);
+  __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
 
   // Prepare for conversion loop.
   Register src_elements = elements;
diff --git a/src/s390/constants-s390.cc b/src/s390/constants-s390.cc
index a958082..da53613 100644
--- a/src/s390/constants-s390.cc
+++ b/src/s390/constants-s390.cc
@@ -9,6 +9,267 @@
 namespace v8 {
 namespace internal {
 
+Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
+    // Based on Figure B-3 in z/Architecture Principles of
+    // Operation.
+    TWO_BYTE_OPCODE,           // 0x00
+    TWO_BYTE_OPCODE,           // 0x01
+    TWO_BYTE_DISJOINT_OPCODE,  // 0x02
+    TWO_BYTE_DISJOINT_OPCODE,  // 0x03
+    ONE_BYTE_OPCODE,           // 0x04
+    ONE_BYTE_OPCODE,           // 0x05
+    ONE_BYTE_OPCODE,           // 0x06
+    ONE_BYTE_OPCODE,           // 0x07
+    ONE_BYTE_OPCODE,           // 0x08
+    ONE_BYTE_OPCODE,           // 0x09
+    ONE_BYTE_OPCODE,           // 0x0a
+    ONE_BYTE_OPCODE,           // 0x0b
+    ONE_BYTE_OPCODE,           // 0x0c
+    ONE_BYTE_OPCODE,           // 0x0d
+    ONE_BYTE_OPCODE,           // 0x0e
+    ONE_BYTE_OPCODE,           // 0x0f
+    ONE_BYTE_OPCODE,           // 0x10
+    ONE_BYTE_OPCODE,           // 0x11
+    ONE_BYTE_OPCODE,           // 0x12
+    ONE_BYTE_OPCODE,           // 0x13
+    ONE_BYTE_OPCODE,           // 0x14
+    ONE_BYTE_OPCODE,           // 0x15
+    ONE_BYTE_OPCODE,           // 0x16
+    ONE_BYTE_OPCODE,           // 0x17
+    ONE_BYTE_OPCODE,           // 0x18
+    ONE_BYTE_OPCODE,           // 0x19
+    ONE_BYTE_OPCODE,           // 0x1a
+    ONE_BYTE_OPCODE,           // 0x1b
+    ONE_BYTE_OPCODE,           // 0x1c
+    ONE_BYTE_OPCODE,           // 0x1d
+    ONE_BYTE_OPCODE,           // 0x1e
+    ONE_BYTE_OPCODE,           // 0x1f
+    ONE_BYTE_OPCODE,           // 0x20
+    ONE_BYTE_OPCODE,           // 0x21
+    ONE_BYTE_OPCODE,           // 0x22
+    ONE_BYTE_OPCODE,           // 0x23
+    ONE_BYTE_OPCODE,           // 0x24
+    ONE_BYTE_OPCODE,           // 0x25
+    ONE_BYTE_OPCODE,           // 0x26
+    ONE_BYTE_OPCODE,           // 0x27
+    ONE_BYTE_OPCODE,           // 0x28
+    ONE_BYTE_OPCODE,           // 0x29
+    ONE_BYTE_OPCODE,           // 0x2a
+    ONE_BYTE_OPCODE,           // 0x2b
+    ONE_BYTE_OPCODE,           // 0x2c
+    ONE_BYTE_OPCODE,           // 0x2d
+    ONE_BYTE_OPCODE,           // 0x2e
+    ONE_BYTE_OPCODE,           // 0x2f
+    ONE_BYTE_OPCODE,           // 0x30
+    ONE_BYTE_OPCODE,           // 0x31
+    ONE_BYTE_OPCODE,           // 0x32
+    ONE_BYTE_OPCODE,           // 0x33
+    ONE_BYTE_OPCODE,           // 0x34
+    ONE_BYTE_OPCODE,           // 0x35
+    ONE_BYTE_OPCODE,           // 0x36
+    ONE_BYTE_OPCODE,           // 0x37
+    ONE_BYTE_OPCODE,           // 0x38
+    ONE_BYTE_OPCODE,           // 0x39
+    ONE_BYTE_OPCODE,           // 0x3a
+    ONE_BYTE_OPCODE,           // 0x3b
+    ONE_BYTE_OPCODE,           // 0x3c
+    ONE_BYTE_OPCODE,           // 0x3d
+    ONE_BYTE_OPCODE,           // 0x3e
+    ONE_BYTE_OPCODE,           // 0x3f
+    ONE_BYTE_OPCODE,           // 0x40
+    ONE_BYTE_OPCODE,           // 0x41
+    ONE_BYTE_OPCODE,           // 0x42
+    ONE_BYTE_OPCODE,           // 0x43
+    ONE_BYTE_OPCODE,           // 0x44
+    ONE_BYTE_OPCODE,           // 0x45
+    ONE_BYTE_OPCODE,           // 0x46
+    ONE_BYTE_OPCODE,           // 0x47
+    ONE_BYTE_OPCODE,           // 0x48
+    ONE_BYTE_OPCODE,           // 0x49
+    ONE_BYTE_OPCODE,           // 0x4a
+    ONE_BYTE_OPCODE,           // 0x4b
+    ONE_BYTE_OPCODE,           // 0x4c
+    ONE_BYTE_OPCODE,           // 0x4d
+    ONE_BYTE_OPCODE,           // 0x4e
+    ONE_BYTE_OPCODE,           // 0x4f
+    ONE_BYTE_OPCODE,           // 0x50
+    ONE_BYTE_OPCODE,           // 0x51
+    ONE_BYTE_OPCODE,           // 0x52
+    ONE_BYTE_OPCODE,           // 0x53
+    ONE_BYTE_OPCODE,           // 0x54
+    ONE_BYTE_OPCODE,           // 0x55
+    ONE_BYTE_OPCODE,           // 0x56
+    ONE_BYTE_OPCODE,           // 0x57
+    ONE_BYTE_OPCODE,           // 0x58
+    ONE_BYTE_OPCODE,           // 0x59
+    ONE_BYTE_OPCODE,           // 0x5a
+    ONE_BYTE_OPCODE,           // 0x5b
+    ONE_BYTE_OPCODE,           // 0x5c
+    ONE_BYTE_OPCODE,           // 0x5d
+    ONE_BYTE_OPCODE,           // 0x5e
+    ONE_BYTE_OPCODE,           // 0x5f
+    ONE_BYTE_OPCODE,           // 0x60
+    ONE_BYTE_OPCODE,           // 0x61
+    ONE_BYTE_OPCODE,           // 0x62
+    ONE_BYTE_OPCODE,           // 0x63
+    ONE_BYTE_OPCODE,           // 0x64
+    ONE_BYTE_OPCODE,           // 0x65
+    ONE_BYTE_OPCODE,           // 0x66
+    ONE_BYTE_OPCODE,           // 0x67
+    ONE_BYTE_OPCODE,           // 0x68
+    ONE_BYTE_OPCODE,           // 0x69
+    ONE_BYTE_OPCODE,           // 0x6a
+    ONE_BYTE_OPCODE,           // 0x6b
+    ONE_BYTE_OPCODE,           // 0x6c
+    ONE_BYTE_OPCODE,           // 0x6d
+    ONE_BYTE_OPCODE,           // 0x6e
+    ONE_BYTE_OPCODE,           // 0x6f
+    ONE_BYTE_OPCODE,           // 0x70
+    ONE_BYTE_OPCODE,           // 0x71
+    ONE_BYTE_OPCODE,           // 0x72
+    ONE_BYTE_OPCODE,           // 0x73
+    ONE_BYTE_OPCODE,           // 0x74
+    ONE_BYTE_OPCODE,           // 0x75
+    ONE_BYTE_OPCODE,           // 0x76
+    ONE_BYTE_OPCODE,           // 0x77
+    ONE_BYTE_OPCODE,           // 0x78
+    ONE_BYTE_OPCODE,           // 0x79
+    ONE_BYTE_OPCODE,           // 0x7a
+    ONE_BYTE_OPCODE,           // 0x7b
+    ONE_BYTE_OPCODE,           // 0x7c
+    ONE_BYTE_OPCODE,           // 0x7d
+    ONE_BYTE_OPCODE,           // 0x7e
+    ONE_BYTE_OPCODE,           // 0x7f
+    ONE_BYTE_OPCODE,           // 0x80
+    ONE_BYTE_OPCODE,           // 0x81
+    ONE_BYTE_OPCODE,           // 0x82
+    ONE_BYTE_OPCODE,           // 0x83
+    ONE_BYTE_OPCODE,           // 0x84
+    ONE_BYTE_OPCODE,           // 0x85
+    ONE_BYTE_OPCODE,           // 0x86
+    ONE_BYTE_OPCODE,           // 0x87
+    ONE_BYTE_OPCODE,           // 0x88
+    ONE_BYTE_OPCODE,           // 0x89
+    ONE_BYTE_OPCODE,           // 0x8a
+    ONE_BYTE_OPCODE,           // 0x8b
+    ONE_BYTE_OPCODE,           // 0x8c
+    ONE_BYTE_OPCODE,           // 0x8d
+    ONE_BYTE_OPCODE,           // 0x8e
+    ONE_BYTE_OPCODE,           // 0x8f
+    ONE_BYTE_OPCODE,           // 0x90
+    ONE_BYTE_OPCODE,           // 0x91
+    ONE_BYTE_OPCODE,           // 0x92
+    ONE_BYTE_OPCODE,           // 0x93
+    ONE_BYTE_OPCODE,           // 0x94
+    ONE_BYTE_OPCODE,           // 0x95
+    ONE_BYTE_OPCODE,           // 0x96
+    ONE_BYTE_OPCODE,           // 0x97
+    ONE_BYTE_OPCODE,           // 0x98
+    ONE_BYTE_OPCODE,           // 0x99
+    ONE_BYTE_OPCODE,           // 0x9a
+    ONE_BYTE_OPCODE,           // 0x9b
+    TWO_BYTE_DISJOINT_OPCODE,  // 0x9c
+    TWO_BYTE_DISJOINT_OPCODE,  // 0x9d
+    TWO_BYTE_DISJOINT_OPCODE,  // 0x9e
+    TWO_BYTE_DISJOINT_OPCODE,  // 0x9f
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xa0
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xa1
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xa2
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xa3
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xa4
+    THREE_NIBBLE_OPCODE,       // 0xa5
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xa6
+    THREE_NIBBLE_OPCODE,       // 0xa7
+    ONE_BYTE_OPCODE,           // 0xa8
+    ONE_BYTE_OPCODE,           // 0xa9
+    ONE_BYTE_OPCODE,           // 0xaa
+    ONE_BYTE_OPCODE,           // 0xab
+    ONE_BYTE_OPCODE,           // 0xac
+    ONE_BYTE_OPCODE,           // 0xad
+    ONE_BYTE_OPCODE,           // 0xae
+    ONE_BYTE_OPCODE,           // 0xaf
+    ONE_BYTE_OPCODE,           // 0xb0
+    ONE_BYTE_OPCODE,           // 0xb1
+    TWO_BYTE_OPCODE,           // 0xb2
+    TWO_BYTE_OPCODE,           // 0xb3
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xb4
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xb5
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xb6
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xb7
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xb8
+    TWO_BYTE_OPCODE,           // 0xb9
+    ONE_BYTE_OPCODE,           // 0xba
+    ONE_BYTE_OPCODE,           // 0xbb
+    ONE_BYTE_OPCODE,           // 0xbc
+    ONE_BYTE_OPCODE,           // 0xbd
+    ONE_BYTE_OPCODE,           // 0xbe
+    ONE_BYTE_OPCODE,           // 0xbf
+    THREE_NIBBLE_OPCODE,       // 0xc0
+    THREE_NIBBLE_OPCODE,       // 0xc1
+    THREE_NIBBLE_OPCODE,       // 0xc2
+    THREE_NIBBLE_OPCODE,       // 0xc3
+    THREE_NIBBLE_OPCODE,       // 0xc4
+    THREE_NIBBLE_OPCODE,       // 0xc5
+    THREE_NIBBLE_OPCODE,       // 0xc6
+    ONE_BYTE_OPCODE,           // 0xc7
+    THREE_NIBBLE_OPCODE,       // 0xc8
+    THREE_NIBBLE_OPCODE,       // 0xc9
+    THREE_NIBBLE_OPCODE,       // 0xca
+    THREE_NIBBLE_OPCODE,       // 0xcb
+    THREE_NIBBLE_OPCODE,       // 0xcc
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xcd
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xce
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xcf
+    ONE_BYTE_OPCODE,           // 0xd0
+    ONE_BYTE_OPCODE,           // 0xd1
+    ONE_BYTE_OPCODE,           // 0xd2
+    ONE_BYTE_OPCODE,           // 0xd3
+    ONE_BYTE_OPCODE,           // 0xd4
+    ONE_BYTE_OPCODE,           // 0xd5
+    ONE_BYTE_OPCODE,           // 0xd6
+    ONE_BYTE_OPCODE,           // 0xd7
+    ONE_BYTE_OPCODE,           // 0xd8
+    ONE_BYTE_OPCODE,           // 0xd9
+    ONE_BYTE_OPCODE,           // 0xda
+    ONE_BYTE_OPCODE,           // 0xdb
+    ONE_BYTE_OPCODE,           // 0xdc
+    ONE_BYTE_OPCODE,           // 0xdd
+    ONE_BYTE_OPCODE,           // 0xde
+    ONE_BYTE_OPCODE,           // 0xdf
+    ONE_BYTE_OPCODE,           // 0xe0
+    ONE_BYTE_OPCODE,           // 0xe1
+    ONE_BYTE_OPCODE,           // 0xe2
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xe3
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xe4
+    TWO_BYTE_OPCODE,           // 0xe5
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xe6
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xe7
+    ONE_BYTE_OPCODE,           // 0xe8
+    ONE_BYTE_OPCODE,           // 0xe9
+    ONE_BYTE_OPCODE,           // 0xea
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xeb
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xec
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xed
+    ONE_BYTE_OPCODE,           // 0xee
+    ONE_BYTE_OPCODE,           // 0xef
+    ONE_BYTE_OPCODE,           // 0xf0
+    ONE_BYTE_OPCODE,           // 0xf1
+    ONE_BYTE_OPCODE,           // 0xf2
+    ONE_BYTE_OPCODE,           // 0xf3
+    ONE_BYTE_OPCODE,           // 0xf4
+    ONE_BYTE_OPCODE,           // 0xf5
+    ONE_BYTE_OPCODE,           // 0xf6
+    ONE_BYTE_OPCODE,           // 0xf7
+    ONE_BYTE_OPCODE,           // 0xf8
+    ONE_BYTE_OPCODE,           // 0xf9
+    ONE_BYTE_OPCODE,           // 0xfa
+    ONE_BYTE_OPCODE,           // 0xfb
+    ONE_BYTE_OPCODE,           // 0xfc
+    ONE_BYTE_OPCODE,           // 0xfd
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xfe
+    TWO_BYTE_DISJOINT_OPCODE,  // 0xff
+};
+
 // These register names are defined in a way to match the native disassembler
 // formatting. See for example the command "objdump -d <binary file>".
 const char* Registers::names_[kNumRegisters] = {
diff --git a/src/s390/constants-s390.h b/src/s390/constants-s390.h
index c313c92..9dfb32c 100644
--- a/src/s390/constants-s390.h
+++ b/src/s390/constants-s390.h
@@ -1080,6 +1080,7 @@
     THREE_NIBBLE_OPCODE        // Three Nibbles - Bits 0 to 7, 12 to 15
   };
 
+  static OpcodeFormatType OpcodeFormatTable[256];
 // Helper macro to define static accessors.
 // We use the cast to char* trick to bypass the strict anti-aliasing rules.
 #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
@@ -1254,37 +1255,7 @@
   // Get Instruction Format Type
   static OpcodeFormatType getOpcodeFormatType(const byte* instr) {
     const byte firstByte = *instr;
-    // Based on Figure B-3 in z/Architecture Principles of
-    // Operation.
-
-    // 1-byte opcodes
-    //   I, RR, RS, RSI, RX, SS Formats
-    if ((0x04 <= firstByte && 0x9B >= firstByte) ||
-        (0xA8 <= firstByte && 0xB1 >= firstByte) ||
-        (0xBA <= firstByte && 0xBF >= firstByte) || (0xC5 == firstByte) ||
-        (0xC7 == firstByte) || (0xD0 <= firstByte && 0xE2 >= firstByte) ||
-        (0xE8 <= firstByte && 0xEA >= firstByte) ||
-        (0xEE <= firstByte && 0xFD >= firstByte)) {
-      return ONE_BYTE_OPCODE;
-    }
-
-    // 2-byte opcodes
-    //   E, IE, RRD, RRE, RRF, SIL, S, SSE Formats
-    if ((0x00 == firstByte) ||  // Software breakpoint 0x0001
-        (0x01 == firstByte) || (0xB2 == firstByte) || (0xB3 == firstByte) ||
-        (0xB9 == firstByte) || (0xE5 == firstByte)) {
-      return TWO_BYTE_OPCODE;
-    }
-
-    // 3-nibble opcodes
-    //   RI, RIL, SSF Formats
-    if ((0xA5 == firstByte) || (0xA7 == firstByte) ||
-        (0xC0 <= firstByte && 0xCC >= firstByte)) {  // C5,C7 handled above
-      return THREE_NIBBLE_OPCODE;
-    }
-    // Remaining ones are all TWO_BYTE_DISJOINT OPCODES.
-    DCHECK(InstructionLength(instr) == 6);
-    return TWO_BYTE_DISJOINT_OPCODE;
+    return OpcodeFormatTable[firstByte];
   }
 
   // Extract the full opcode from the instruction.
@@ -1304,11 +1275,10 @@
       case TWO_BYTE_DISJOINT_OPCODE:
         // Two Bytes - Bits 0 to 7, 40 to 47
         return static_cast<Opcode>((*instr << 8) | (*(instr + 5) & 0xFF));
-      case THREE_NIBBLE_OPCODE:
+      default:
+        // case THREE_NIBBLE_OPCODE:
         // Three Nibbles - Bits 0 to 7, 12 to 15
         return static_cast<Opcode>((*instr << 4) | (*(instr + 1) & 0xF));
-      default:
-        break;
     }
 
     UNREACHABLE();
diff --git a/src/s390/interface-descriptors-s390.cc b/src/s390/interface-descriptors-s390.cc
index 63afca8..aae1949 100644
--- a/src/s390/interface-descriptors-s390.cc
+++ b/src/s390/interface-descriptors-s390.cc
@@ -36,13 +36,11 @@
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
 
-const Register InstanceOfDescriptor::LeftRegister() { return r3; }
-const Register InstanceOfDescriptor::RightRegister() { return r2; }
-
 const Register StringCompareDescriptor::LeftRegister() { return r3; }
 const Register StringCompareDescriptor::RightRegister() { return r2; }
 
-const Register ApiGetterDescriptor::function_address() { return r4; }
+const Register ApiGetterDescriptor::HolderRegister() { return r2; }
+const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
 
 const Register MathPowTaggedDescriptor::exponent() { return r4; }
 
@@ -53,6 +51,9 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return r2; }
+const Register HasPropertyDescriptor::KeyRegister() { return r5; }
+
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r4};
@@ -211,12 +212,17 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r2};
+  // register state
+  // r2 -- number of arguments
+  // r3 -- function
+  // r4 -- allocation site with elements kind
+  Register registers[] = {r3, r4, r2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
@@ -275,6 +281,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r3, r2};
@@ -330,9 +342,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
-      kInterpreterDispatchTableRegister};
+      kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+      kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -367,6 +378,16 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r2,  // the value to pass to the generator
+      r3,  // the JSGeneratorObject to resume
+      r4   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/s390/macro-assembler-s390.cc b/src/s390/macro-assembler-s390.cc
index 21058f4..9257e64 100644
--- a/src/s390/macro-assembler-s390.cc
+++ b/src/s390/macro-assembler-s390.cc
@@ -480,8 +480,8 @@
   // Save caller-saved registers.  js_function and code_entry are in the
   // caller-saved register list.
   DCHECK(kJSCallerSaved & js_function.bit());
-  DCHECK(kJSCallerSaved & code_entry.bit());
-  MultiPush(kJSCallerSaved | r14.bit());
+  // DCHECK(kJSCallerSaved & code_entry.bit());
+  MultiPush(kJSCallerSaved | code_entry.bit() | r14.bit());
 
   int argument_count = 3;
   PrepareCallCFunction(argument_count, code_entry);
@@ -499,7 +499,7 @@
   }
 
   // Restore caller-saved registers (including js_function and code_entry).
-  MultiPop(kJSCallerSaved | r14.bit());
+  MultiPop(kJSCallerSaved | code_entry.bit() | r14.bit());
 
   bind(&done);
 }
@@ -1706,6 +1706,7 @@
                               Register scratch1, Register scratch2,
                               Label* gc_required, AllocationFlags flags) {
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1767,7 +1768,7 @@
     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     AndP(result_end, result, Operand(kDoubleAlignmentMask));
     Label aligned;
-    beq(&aligned);
+    beq(&aligned, Label::kNear);
     if ((flags & PRETENURE) != 0) {
       CmpLogicalP(result, alloc_limit);
       bge(gc_required);
@@ -1792,17 +1793,20 @@
     blt(gc_required);
     AddP(result_end, result, result_end);
   }
-  StoreP(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    AddP(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    StoreP(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  AddP(result, result, Operand(kHeapObjectTag));
 }
 
 void MacroAssembler::Allocate(Register object_size, Register result,
                               Register result_end, Register scratch,
                               Label* gc_required, AllocationFlags flags) {
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1860,7 +1864,7 @@
     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     AndP(result_end, result, Operand(kDoubleAlignmentMask));
     Label aligned;
-    beq(&aligned);
+    beq(&aligned, Label::kNear);
     if ((flags & PRETENURE) != 0) {
       CmpLogicalP(result, alloc_limit);
       bge(gc_required);
@@ -1892,12 +1896,114 @@
     AndP(r0, result_end, Operand(kObjectAlignmentMask));
     Check(eq, kUnalignedAllocationInNewSpace, cr0);
   }
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    StoreP(result_end, MemOperand(top_address));
+  }
+
+  // Tag object.
+  AddP(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, Register scratch,
+                                  AllocationFlags flags) {
+  // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+  // is not specified. Other registers must not overlap.
+  DCHECK(!AreAliased(object_size, result, scratch, ip));
+  DCHECK(!AreAliased(result_end, result, scratch, ip));
+  DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  Register top_address = scratch;
+  mov(top_address, Operand(allocation_top));
+  LoadP(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    AndP(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    beq(&aligned, Label::kNear);
+    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    StoreW(result_end, MemOperand(result));
+    AddP(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+#endif
+  }
+
+  // Calculate new top using result. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
+    AddP(result_end, result, result_end);
+  } else {
+    AddP(result_end, result, object_size);
+  }
+
+  // Update allocation top. result temporarily holds the new top.
+  if (emit_debug_code()) {
+    AndP(r0, result_end, Operand(kObjectAlignmentMask));
+    Check(eq, kUnalignedAllocationInNewSpace, cr0);
+  }
   StoreP(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    AddP(result, result, Operand(kHeapObjectTag));
+  // Tag object.
+  AddP(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register scratch1, Register scratch2,
+                                  AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
   }
+  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address register.
+  Register top_address = scratch1;
+  Register result_end = scratch2;
+  mov(top_address, Operand(allocation_top));
+  LoadP(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    AndP(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    beq(&aligned, Label::kNear);
+    mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    StoreW(result_end, MemOperand(result));
+    AddP(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+#endif
+  }
+
+  // Calculate new top using result.
+  AddP(result_end, result, Operand(object_size));
+
+  // The top pointer is not updated for allocation folding dominators.
+  StoreP(result_end, MemOperand(top_address));
+
+  // Tag object.
+  AddP(result, result, Operand(kHeapObjectTag));
 }
 
 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
@@ -1914,7 +2020,8 @@
   AndP(scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate two-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
@@ -1934,7 +2041,8 @@
   AndP(scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate one-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -1946,7 +2054,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
                       scratch2);
@@ -1957,7 +2065,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -1969,7 +2077,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
                       scratch2);
@@ -1981,7 +2089,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -2847,6 +2955,18 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object);
+    Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+    push(object);
+    CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+    pop(object);
+    Check(eq, kOperandIsNotAGeneratorObject);
+  }
+}
+
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
     STATIC_ASSERT(kSmiTag == 0);
@@ -2935,12 +3055,11 @@
                                         Register scratch2,
                                         Register heap_number_map,
                                         Label* gc_required,
-                                        TaggingMode tagging_mode,
                                         MutableMode mode) {
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
-           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+           NO_ALLOCATION_FLAGS);
 
   Heap::RootListIndex map_index = mode == MUTABLE
                                       ? Heap::kMutableHeapNumberMapRootIndex
@@ -2948,11 +3067,7 @@
   AssertIsRoot(heap_number_map, map_index);
 
   // Store heap number map in the allocated object.
-  if (tagging_mode == TAG_RESULT) {
     StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-  } else {
-    StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
-  }
 }
 
 void MacroAssembler::AllocateHeapNumberWithValue(
@@ -2971,7 +3086,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -3539,7 +3655,7 @@
                                                      Label* no_memento_found) {
   Label map_check;
   Label top_check;
-  ExternalReference new_space_allocation_top =
+  ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -3550,11 +3666,13 @@
   JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
 
   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
-  AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
 
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
-  XorP(r0, scratch_reg, Operand(new_space_allocation_top));
+  AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+  mov(ip, Operand(new_space_allocation_top_adr));
+  LoadP(ip, MemOperand(ip));
+  XorP(r0, scratch_reg, ip);
   AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
   beq(&top_check, Label::kNear);
   // The object is on a different page than allocation top. Bail out if the
@@ -3568,7 +3686,7 @@
   // If top is on the same page as the current object, we need to check whether
   // we are below top.
   bind(&top_check);
-  CmpP(scratch_reg, Operand(new_space_allocation_top));
+  CmpP(scratch_reg, ip);
   bgt(no_memento_found);
   // Memento map check.
   bind(&map_check);
@@ -4961,6 +5079,14 @@
 #endif
 }
 
+void MacroAssembler::LoadB(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  lgbr(dst, src);
+#else
+  lbr(dst, src);
+#endif
+}
+
 void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
 #if V8_TARGET_ARCH_S390X
   llgc(dst, mem);
@@ -5300,7 +5426,7 @@
   ar(dst, r0);
   ShiftRight(r0, dst, Operand(8));
   ar(dst, r0);
-  lbr(dst, dst);
+  LoadB(dst, dst);
 }
 
 #ifdef V8_TARGET_ARCH_S390X
@@ -5315,7 +5441,7 @@
   AddP(dst, r0);
   ShiftRightP(r0, dst, Operand(8));
   AddP(dst, r0);
-  lbr(dst, dst);
+  LoadB(dst, dst);
 }
 #endif
 
diff --git a/src/s390/macro-assembler-s390.h b/src/s390/macro-assembler-s390.h
index 77fcccb..19f0f7c 100644
--- a/src/s390/macro-assembler-s390.h
+++ b/src/s390/macro-assembler-s390.h
@@ -19,10 +19,10 @@
 const Register kReturnRegister2 = {Register::kCode_r4};
 const Register kJSFunctionRegister = {Register::kCode_r3};
 const Register kContextRegister = {Register::kCode_r13};
+const Register kAllocateSizeRegister = {Register::kCode_r3};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r6};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r7};
 const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
 const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
@@ -334,6 +334,7 @@
   void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
   void LoadlW(Register dst, Register src);
   void LoadB(Register dst, const MemOperand& opnd);
+  void LoadB(Register dst, Register src);
   void LoadlB(Register dst, const MemOperand& opnd);
 
   // Load And Test
@@ -411,6 +412,12 @@
 
   void mov(Register dst, const Operand& src);
 
+  void CleanUInt32(Register x) {
+#ifdef V8_TARGET_ARCH_S390X
+    llgfr(x, x);
+#endif
+  }
+
   // ---------------------------------------------------------------------------
   // GC Support
 
@@ -958,6 +965,15 @@
   void Allocate(Register object_size, Register result, Register result_end,
                 Register scratch, Label* gc_required, AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(int object_size, Register result, Register scratch1,
+                    Register scratch2, AllocationFlags flags);
+
+  void FastAllocate(Register object_size, Register result, Register result_end,
+                    Register scratch, AllocationFlags flags);
+
   void AllocateTwoByteString(Register result, Register length,
                              Register scratch1, Register scratch2,
                              Register scratch3, Label* gc_required);
@@ -982,7 +998,6 @@
   // when control continues at the gc_required label.
   void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
                           Register heap_number_map, Label* gc_required,
-                          TaggingMode tagging_mode = TAG_RESULT,
                           MutableMode mode = IMMUTABLE);
   void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
                                    Register scratch1, Register scratch2,
@@ -1659,6 +1674,10 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
   void AssertReceiver(Register object);
 
diff --git a/src/s390/simulator-s390.cc b/src/s390/simulator-s390.cc
index 06e52a7..e819556 100644
--- a/src/s390/simulator-s390.cc
+++ b/src/s390/simulator-s390.cc
@@ -10,6 +10,7 @@
 
 #include "src/assembler.h"
 #include "src/base/bits.h"
+#include "src/base/once.h"
 #include "src/codegen.h"
 #include "src/disasm.h"
 #include "src/runtime/runtime-utils.h"
@@ -274,18 +275,40 @@
               reinterpret_cast<Instruction*>(sim_->get_pc()));
         }
 
-        if (argc == 2 && last_pc != sim_->get_pc() && GetValue(arg1, &value)) {
-          for (int i = 1; (!sim_->has_bad_pc()) && i < value; i++) {
-            disasm::NameConverter converter;
-            disasm::Disassembler dasm(converter);
-            // use a reasonably large buffer
-            v8::internal::EmbeddedVector<char, 256> buffer;
-            dasm.InstructionDecode(buffer,
-                                   reinterpret_cast<byte*>(sim_->get_pc()));
-            PrintF("  0x%08" V8PRIxPTR "  %s\n", sim_->get_pc(),
-                   buffer.start());
-            sim_->ExecuteInstruction(
-                reinterpret_cast<Instruction*>(sim_->get_pc()));
+        if (argc == 2 && last_pc != sim_->get_pc()) {
+          disasm::NameConverter converter;
+          disasm::Disassembler dasm(converter);
+          // use a reasonably large buffer
+          v8::internal::EmbeddedVector<char, 256> buffer;
+
+          if (GetValue(arg1, &value)) {
+            // Interpret a numeric argument as the number of instructions to
+            // step past.
+            for (int i = 1; (!sim_->has_bad_pc()) &&  i < value; i++) {
+              dasm.InstructionDecode(buffer,
+                                    reinterpret_cast<byte*>(sim_->get_pc()));
+              PrintF("  0x%08" V8PRIxPTR "  %s\n", sim_->get_pc(),
+                    buffer.start());
+              sim_->ExecuteInstruction(
+                      reinterpret_cast<Instruction*>(sim_->get_pc()));
+            }
+          } else {
+            // Otherwise treat it as the mnemonic of the opcode to stop at.
+            char mnemonic[256];
+            while (!sim_->has_bad_pc()) {
+              dasm.InstructionDecode(buffer,
+                                    reinterpret_cast<byte*>(sim_->get_pc()));
+              char* mnemonicStart = buffer.start();
+              while (*mnemonicStart != 0 && *mnemonicStart != ' ')
+                mnemonicStart++;
+              SScanF(mnemonicStart, "%s", mnemonic);
+              if (!strcmp(arg1, mnemonic)) break;
+
+              PrintF("  0x%08" V8PRIxPTR "  %s\n", sim_->get_pc(),
+                    buffer.start());
+              sim_->ExecuteInstruction(
+                      reinterpret_cast<Instruction*>(sim_->get_pc()));
+            }
           }
         }
       } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
@@ -574,6 +597,8 @@
         } else {
           PrintF("Wrong usage. Use help command for more information.\n");
         }
+      } else if (strcmp(cmd, "icount") == 0) {
+        PrintF("%05" PRId64 "\n", sim_->icount_);
       } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
         ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
         PrintF("Trace of executed instructions is %s\n",
@@ -748,8 +773,743 @@
   isolate->set_simulator_initialized(true);
   ::v8::internal::ExternalReference::set_redirector(isolate,
                                                     &RedirectExternalReference);
+  static base::OnceType once = V8_ONCE_INIT;
+  base::CallOnce(&once, &Simulator::EvalTableInit);
 }
 
+Simulator::EvaluateFuncType Simulator::EvalTable[] = {NULL};
+
+void Simulator::EvalTableInit() {
+  for (int i = 0; i < MAX_NUM_OPCODES; i++) {
+    EvalTable[i] = &Simulator::Evaluate_Unknown;
+  }
+
+  EvalTable[BKPT] = &Simulator::Evaluate_BKPT;
+  EvalTable[SPM] = &Simulator::Evaluate_SPM;
+  EvalTable[BALR] = &Simulator::Evaluate_BALR;
+  EvalTable[BCTR] = &Simulator::Evaluate_BCTR;
+  EvalTable[BCR] = &Simulator::Evaluate_BCR;
+  EvalTable[SVC] = &Simulator::Evaluate_SVC;
+  EvalTable[BSM] = &Simulator::Evaluate_BSM;
+  EvalTable[BASSM] = &Simulator::Evaluate_BASSM;
+  EvalTable[BASR] = &Simulator::Evaluate_BASR;
+  EvalTable[MVCL] = &Simulator::Evaluate_MVCL;
+  EvalTable[CLCL] = &Simulator::Evaluate_CLCL;
+  EvalTable[LPR] = &Simulator::Evaluate_LPR;
+  EvalTable[LNR] = &Simulator::Evaluate_LNR;
+  EvalTable[LTR] = &Simulator::Evaluate_LTR;
+  EvalTable[LCR] = &Simulator::Evaluate_LCR;
+  EvalTable[NR] = &Simulator::Evaluate_NR;
+  EvalTable[CLR] = &Simulator::Evaluate_CLR;
+  EvalTable[OR] = &Simulator::Evaluate_OR;
+  EvalTable[XR] = &Simulator::Evaluate_XR;
+  EvalTable[LR] = &Simulator::Evaluate_LR;
+  EvalTable[CR] = &Simulator::Evaluate_CR;
+  EvalTable[AR] = &Simulator::Evaluate_AR;
+  EvalTable[SR] = &Simulator::Evaluate_SR;
+  EvalTable[MR] = &Simulator::Evaluate_MR;
+  EvalTable[DR] = &Simulator::Evaluate_DR;
+  EvalTable[ALR] = &Simulator::Evaluate_ALR;
+  EvalTable[SLR] = &Simulator::Evaluate_SLR;
+  EvalTable[LDR] = &Simulator::Evaluate_LDR;
+  EvalTable[CDR] = &Simulator::Evaluate_CDR;
+  EvalTable[LER] = &Simulator::Evaluate_LER;
+  EvalTable[STH] = &Simulator::Evaluate_STH;
+  EvalTable[LA] = &Simulator::Evaluate_LA;
+  EvalTable[STC] = &Simulator::Evaluate_STC;
+  EvalTable[IC_z] = &Simulator::Evaluate_IC_z;
+  EvalTable[EX] = &Simulator::Evaluate_EX;
+  EvalTable[BAL] = &Simulator::Evaluate_BAL;
+  EvalTable[BCT] = &Simulator::Evaluate_BCT;
+  EvalTable[BC] = &Simulator::Evaluate_BC;
+  EvalTable[LH] = &Simulator::Evaluate_LH;
+  EvalTable[CH] = &Simulator::Evaluate_CH;
+  EvalTable[AH] = &Simulator::Evaluate_AH;
+  EvalTable[SH] = &Simulator::Evaluate_SH;
+  EvalTable[MH] = &Simulator::Evaluate_MH;
+  EvalTable[BAS] = &Simulator::Evaluate_BAS;
+  EvalTable[CVD] = &Simulator::Evaluate_CVD;
+  EvalTable[CVB] = &Simulator::Evaluate_CVB;
+  EvalTable[ST] = &Simulator::Evaluate_ST;
+  EvalTable[LAE] = &Simulator::Evaluate_LAE;
+  EvalTable[N] = &Simulator::Evaluate_N;
+  EvalTable[CL] = &Simulator::Evaluate_CL;
+  EvalTable[O] = &Simulator::Evaluate_O;
+  EvalTable[X] = &Simulator::Evaluate_X;
+  EvalTable[L] = &Simulator::Evaluate_L;
+  EvalTable[C] = &Simulator::Evaluate_C;
+  EvalTable[A] = &Simulator::Evaluate_A;
+  EvalTable[S] = &Simulator::Evaluate_S;
+  EvalTable[M] = &Simulator::Evaluate_M;
+  EvalTable[D] = &Simulator::Evaluate_D;
+  EvalTable[AL] = &Simulator::Evaluate_AL;
+  EvalTable[SL] = &Simulator::Evaluate_SL;
+  EvalTable[STD] = &Simulator::Evaluate_STD;
+  EvalTable[LD] = &Simulator::Evaluate_LD;
+  EvalTable[CD] = &Simulator::Evaluate_CD;
+  EvalTable[STE] = &Simulator::Evaluate_STE;
+  EvalTable[MS] = &Simulator::Evaluate_MS;
+  EvalTable[LE] = &Simulator::Evaluate_LE;
+  EvalTable[BRXH] = &Simulator::Evaluate_BRXH;
+  EvalTable[BRXLE] = &Simulator::Evaluate_BRXLE;
+  EvalTable[BXH] = &Simulator::Evaluate_BXH;
+  EvalTable[BXLE] = &Simulator::Evaluate_BXLE;
+  EvalTable[SRL] = &Simulator::Evaluate_SRL;
+  EvalTable[SLL] = &Simulator::Evaluate_SLL;
+  EvalTable[SRA] = &Simulator::Evaluate_SRA;
+  EvalTable[SLA] = &Simulator::Evaluate_SLA;
+  EvalTable[SRDL] = &Simulator::Evaluate_SRDL;
+  EvalTable[SLDL] = &Simulator::Evaluate_SLDL;
+  EvalTable[SRDA] = &Simulator::Evaluate_SRDA;
+  EvalTable[SLDA] = &Simulator::Evaluate_SLDA;
+  EvalTable[STM] = &Simulator::Evaluate_STM;
+  EvalTable[TM] = &Simulator::Evaluate_TM;
+  EvalTable[MVI] = &Simulator::Evaluate_MVI;
+  EvalTable[TS] = &Simulator::Evaluate_TS;
+  EvalTable[NI] = &Simulator::Evaluate_NI;
+  EvalTable[CLI] = &Simulator::Evaluate_CLI;
+  EvalTable[OI] = &Simulator::Evaluate_OI;
+  EvalTable[XI] = &Simulator::Evaluate_XI;
+  EvalTable[LM] = &Simulator::Evaluate_LM;
+  EvalTable[MVCLE] = &Simulator::Evaluate_MVCLE;
+  EvalTable[CLCLE] = &Simulator::Evaluate_CLCLE;
+  EvalTable[MC] = &Simulator::Evaluate_MC;
+  EvalTable[CDS] = &Simulator::Evaluate_CDS;
+  EvalTable[STCM] = &Simulator::Evaluate_STCM;
+  EvalTable[ICM] = &Simulator::Evaluate_ICM;
+  EvalTable[BPRP] = &Simulator::Evaluate_BPRP;
+  EvalTable[BPP] = &Simulator::Evaluate_BPP;
+  EvalTable[TRTR] = &Simulator::Evaluate_TRTR;
+  EvalTable[MVN] = &Simulator::Evaluate_MVN;
+  EvalTable[MVC] = &Simulator::Evaluate_MVC;
+  EvalTable[MVZ] = &Simulator::Evaluate_MVZ;
+  EvalTable[NC] = &Simulator::Evaluate_NC;
+  EvalTable[CLC] = &Simulator::Evaluate_CLC;
+  EvalTable[OC] = &Simulator::Evaluate_OC;
+  EvalTable[XC] = &Simulator::Evaluate_XC;
+  EvalTable[MVCP] = &Simulator::Evaluate_MVCP;
+  EvalTable[TR] = &Simulator::Evaluate_TR;
+  EvalTable[TRT] = &Simulator::Evaluate_TRT;
+  EvalTable[ED] = &Simulator::Evaluate_ED;
+  EvalTable[EDMK] = &Simulator::Evaluate_EDMK;
+  EvalTable[PKU] = &Simulator::Evaluate_PKU;
+  EvalTable[UNPKU] = &Simulator::Evaluate_UNPKU;
+  EvalTable[MVCIN] = &Simulator::Evaluate_MVCIN;
+  EvalTable[PKA] = &Simulator::Evaluate_PKA;
+  EvalTable[UNPKA] = &Simulator::Evaluate_UNPKA;
+  EvalTable[PLO] = &Simulator::Evaluate_PLO;
+  EvalTable[LMD] = &Simulator::Evaluate_LMD;
+  EvalTable[SRP] = &Simulator::Evaluate_SRP;
+  EvalTable[MVO] = &Simulator::Evaluate_MVO;
+  EvalTable[PACK] = &Simulator::Evaluate_PACK;
+  EvalTable[UNPK] = &Simulator::Evaluate_UNPK;
+  EvalTable[ZAP] = &Simulator::Evaluate_ZAP;
+  EvalTable[AP] = &Simulator::Evaluate_AP;
+  EvalTable[SP] = &Simulator::Evaluate_SP;
+  EvalTable[MP] = &Simulator::Evaluate_MP;
+  EvalTable[DP] = &Simulator::Evaluate_DP;
+  EvalTable[UPT] = &Simulator::Evaluate_UPT;
+  EvalTable[PFPO] = &Simulator::Evaluate_PFPO;
+  EvalTable[IIHH] = &Simulator::Evaluate_IIHH;
+  EvalTable[IIHL] = &Simulator::Evaluate_IIHL;
+  EvalTable[IILH] = &Simulator::Evaluate_IILH;
+  EvalTable[IILL] = &Simulator::Evaluate_IILL;
+  EvalTable[NIHH] = &Simulator::Evaluate_NIHH;
+  EvalTable[NIHL] = &Simulator::Evaluate_NIHL;
+  EvalTable[NILH] = &Simulator::Evaluate_NILH;
+  EvalTable[NILL] = &Simulator::Evaluate_NILL;
+  EvalTable[OIHH] = &Simulator::Evaluate_OIHH;
+  EvalTable[OIHL] = &Simulator::Evaluate_OIHL;
+  EvalTable[OILH] = &Simulator::Evaluate_OILH;
+  EvalTable[OILL] = &Simulator::Evaluate_OILL;
+  EvalTable[LLIHH] = &Simulator::Evaluate_LLIHH;
+  EvalTable[LLIHL] = &Simulator::Evaluate_LLIHL;
+  EvalTable[LLILH] = &Simulator::Evaluate_LLILH;
+  EvalTable[LLILL] = &Simulator::Evaluate_LLILL;
+  EvalTable[TMLH] = &Simulator::Evaluate_TMLH;
+  EvalTable[TMLL] = &Simulator::Evaluate_TMLL;
+  EvalTable[TMHH] = &Simulator::Evaluate_TMHH;
+  EvalTable[TMHL] = &Simulator::Evaluate_TMHL;
+  EvalTable[BRC] = &Simulator::Evaluate_BRC;
+  EvalTable[BRAS] = &Simulator::Evaluate_BRAS;
+  EvalTable[BRCT] = &Simulator::Evaluate_BRCT;
+  EvalTable[BRCTG] = &Simulator::Evaluate_BRCTG;
+  EvalTable[LHI] = &Simulator::Evaluate_LHI;
+  EvalTable[LGHI] = &Simulator::Evaluate_LGHI;
+  EvalTable[AHI] = &Simulator::Evaluate_AHI;
+  EvalTable[AGHI] = &Simulator::Evaluate_AGHI;
+  EvalTable[MHI] = &Simulator::Evaluate_MHI;
+  EvalTable[MGHI] = &Simulator::Evaluate_MGHI;
+  EvalTable[CHI] = &Simulator::Evaluate_CHI;
+  EvalTable[CGHI] = &Simulator::Evaluate_CGHI;
+  EvalTable[LARL] = &Simulator::Evaluate_LARL;
+  EvalTable[LGFI] = &Simulator::Evaluate_LGFI;
+  EvalTable[BRCL] = &Simulator::Evaluate_BRCL;
+  EvalTable[BRASL] = &Simulator::Evaluate_BRASL;
+  EvalTable[XIHF] = &Simulator::Evaluate_XIHF;
+  EvalTable[XILF] = &Simulator::Evaluate_XILF;
+  EvalTable[IIHF] = &Simulator::Evaluate_IIHF;
+  EvalTable[IILF] = &Simulator::Evaluate_IILF;
+  EvalTable[NIHF] = &Simulator::Evaluate_NIHF;
+  EvalTable[NILF] = &Simulator::Evaluate_NILF;
+  EvalTable[OIHF] = &Simulator::Evaluate_OIHF;
+  EvalTable[OILF] = &Simulator::Evaluate_OILF;
+  EvalTable[LLIHF] = &Simulator::Evaluate_LLIHF;
+  EvalTable[LLILF] = &Simulator::Evaluate_LLILF;
+  EvalTable[MSGFI] = &Simulator::Evaluate_MSGFI;
+  EvalTable[MSFI] = &Simulator::Evaluate_MSFI;
+  EvalTable[SLGFI] = &Simulator::Evaluate_SLGFI;
+  EvalTable[SLFI] = &Simulator::Evaluate_SLFI;
+  EvalTable[AGFI] = &Simulator::Evaluate_AGFI;
+  EvalTable[AFI] = &Simulator::Evaluate_AFI;
+  EvalTable[ALGFI] = &Simulator::Evaluate_ALGFI;
+  EvalTable[ALFI] = &Simulator::Evaluate_ALFI;
+  EvalTable[CGFI] = &Simulator::Evaluate_CGFI;
+  EvalTable[CFI] = &Simulator::Evaluate_CFI;
+  EvalTable[CLGFI] = &Simulator::Evaluate_CLGFI;
+  EvalTable[CLFI] = &Simulator::Evaluate_CLFI;
+  EvalTable[LLHRL] = &Simulator::Evaluate_LLHRL;
+  EvalTable[LGHRL] = &Simulator::Evaluate_LGHRL;
+  EvalTable[LHRL] = &Simulator::Evaluate_LHRL;
+  EvalTable[LLGHRL] = &Simulator::Evaluate_LLGHRL;
+  EvalTable[STHRL] = &Simulator::Evaluate_STHRL;
+  EvalTable[LGRL] = &Simulator::Evaluate_LGRL;
+  EvalTable[STGRL] = &Simulator::Evaluate_STGRL;
+  EvalTable[LGFRL] = &Simulator::Evaluate_LGFRL;
+  EvalTable[LRL] = &Simulator::Evaluate_LRL;
+  EvalTable[LLGFRL] = &Simulator::Evaluate_LLGFRL;
+  EvalTable[STRL] = &Simulator::Evaluate_STRL;
+  EvalTable[EXRL] = &Simulator::Evaluate_EXRL;
+  EvalTable[PFDRL] = &Simulator::Evaluate_PFDRL;
+  EvalTable[CGHRL] = &Simulator::Evaluate_CGHRL;
+  EvalTable[CHRL] = &Simulator::Evaluate_CHRL;
+  EvalTable[CGRL] = &Simulator::Evaluate_CGRL;
+  EvalTable[CGFRL] = &Simulator::Evaluate_CGFRL;
+  EvalTable[ECTG] = &Simulator::Evaluate_ECTG;
+  EvalTable[CSST] = &Simulator::Evaluate_CSST;
+  EvalTable[LPD] = &Simulator::Evaluate_LPD;
+  EvalTable[LPDG] = &Simulator::Evaluate_LPDG;
+  EvalTable[BRCTH] = &Simulator::Evaluate_BRCTH;
+  EvalTable[AIH] = &Simulator::Evaluate_AIH;
+  EvalTable[ALSIH] = &Simulator::Evaluate_ALSIH;
+  EvalTable[ALSIHN] = &Simulator::Evaluate_ALSIHN;
+  EvalTable[CIH] = &Simulator::Evaluate_CIH;
+  EvalTable[STCK] = &Simulator::Evaluate_STCK;
+  EvalTable[CFC] = &Simulator::Evaluate_CFC;
+  EvalTable[IPM] = &Simulator::Evaluate_IPM;
+  EvalTable[HSCH] = &Simulator::Evaluate_HSCH;
+  EvalTable[MSCH] = &Simulator::Evaluate_MSCH;
+  EvalTable[SSCH] = &Simulator::Evaluate_SSCH;
+  EvalTable[STSCH] = &Simulator::Evaluate_STSCH;
+  EvalTable[TSCH] = &Simulator::Evaluate_TSCH;
+  EvalTable[TPI] = &Simulator::Evaluate_TPI;
+  EvalTable[SAL] = &Simulator::Evaluate_SAL;
+  EvalTable[RSCH] = &Simulator::Evaluate_RSCH;
+  EvalTable[STCRW] = &Simulator::Evaluate_STCRW;
+  EvalTable[STCPS] = &Simulator::Evaluate_STCPS;
+  EvalTable[RCHP] = &Simulator::Evaluate_RCHP;
+  EvalTable[SCHM] = &Simulator::Evaluate_SCHM;
+  EvalTable[CKSM] = &Simulator::Evaluate_CKSM;
+  EvalTable[SAR] = &Simulator::Evaluate_SAR;
+  EvalTable[EAR] = &Simulator::Evaluate_EAR;
+  EvalTable[MSR] = &Simulator::Evaluate_MSR;
+  EvalTable[MVST] = &Simulator::Evaluate_MVST;
+  EvalTable[CUSE] = &Simulator::Evaluate_CUSE;
+  EvalTable[SRST] = &Simulator::Evaluate_SRST;
+  EvalTable[XSCH] = &Simulator::Evaluate_XSCH;
+  EvalTable[STCKE] = &Simulator::Evaluate_STCKE;
+  EvalTable[STCKF] = &Simulator::Evaluate_STCKF;
+  EvalTable[SRNM] = &Simulator::Evaluate_SRNM;
+  EvalTable[STFPC] = &Simulator::Evaluate_STFPC;
+  EvalTable[LFPC] = &Simulator::Evaluate_LFPC;
+  EvalTable[TRE] = &Simulator::Evaluate_TRE;
+  EvalTable[CUUTF] = &Simulator::Evaluate_CUUTF;
+  EvalTable[CUTFU] = &Simulator::Evaluate_CUTFU;
+  EvalTable[STFLE] = &Simulator::Evaluate_STFLE;
+  EvalTable[SRNMB] = &Simulator::Evaluate_SRNMB;
+  EvalTable[SRNMT] = &Simulator::Evaluate_SRNMT;
+  EvalTable[LFAS] = &Simulator::Evaluate_LFAS;
+  EvalTable[PPA] = &Simulator::Evaluate_PPA;
+  EvalTable[ETND] = &Simulator::Evaluate_ETND;
+  EvalTable[TEND] = &Simulator::Evaluate_TEND;
+  EvalTable[NIAI] = &Simulator::Evaluate_NIAI;
+  EvalTable[TABORT] = &Simulator::Evaluate_TABORT;
+  EvalTable[TRAP4] = &Simulator::Evaluate_TRAP4;
+  EvalTable[LPEBR] = &Simulator::Evaluate_LPEBR;
+  EvalTable[LNEBR] = &Simulator::Evaluate_LNEBR;
+  EvalTable[LTEBR] = &Simulator::Evaluate_LTEBR;
+  EvalTable[LCEBR] = &Simulator::Evaluate_LCEBR;
+  EvalTable[LDEBR] = &Simulator::Evaluate_LDEBR;
+  EvalTable[LXDBR] = &Simulator::Evaluate_LXDBR;
+  EvalTable[LXEBR] = &Simulator::Evaluate_LXEBR;
+  EvalTable[MXDBR] = &Simulator::Evaluate_MXDBR;
+  EvalTable[KEBR] = &Simulator::Evaluate_KEBR;
+  EvalTable[CEBR] = &Simulator::Evaluate_CEBR;
+  EvalTable[AEBR] = &Simulator::Evaluate_AEBR;
+  EvalTable[SEBR] = &Simulator::Evaluate_SEBR;
+  EvalTable[MDEBR] = &Simulator::Evaluate_MDEBR;
+  EvalTable[DEBR] = &Simulator::Evaluate_DEBR;
+  EvalTable[MAEBR] = &Simulator::Evaluate_MAEBR;
+  EvalTable[MSEBR] = &Simulator::Evaluate_MSEBR;
+  EvalTable[LPDBR] = &Simulator::Evaluate_LPDBR;
+  EvalTable[LNDBR] = &Simulator::Evaluate_LNDBR;
+  EvalTable[LTDBR] = &Simulator::Evaluate_LTDBR;
+  EvalTable[LCDBR] = &Simulator::Evaluate_LCDBR;
+  EvalTable[SQEBR] = &Simulator::Evaluate_SQEBR;
+  EvalTable[SQDBR] = &Simulator::Evaluate_SQDBR;
+  EvalTable[SQXBR] = &Simulator::Evaluate_SQXBR;
+  EvalTable[MEEBR] = &Simulator::Evaluate_MEEBR;
+  EvalTable[KDBR] = &Simulator::Evaluate_KDBR;
+  EvalTable[CDBR] = &Simulator::Evaluate_CDBR;
+  EvalTable[ADBR] = &Simulator::Evaluate_ADBR;
+  EvalTable[SDBR] = &Simulator::Evaluate_SDBR;
+  EvalTable[MDBR] = &Simulator::Evaluate_MDBR;
+  EvalTable[DDBR] = &Simulator::Evaluate_DDBR;
+  EvalTable[MADBR] = &Simulator::Evaluate_MADBR;
+  EvalTable[MSDBR] = &Simulator::Evaluate_MSDBR;
+  EvalTable[LPXBR] = &Simulator::Evaluate_LPXBR;
+  EvalTable[LNXBR] = &Simulator::Evaluate_LNXBR;
+  EvalTable[LTXBR] = &Simulator::Evaluate_LTXBR;
+  EvalTable[LCXBR] = &Simulator::Evaluate_LCXBR;
+  EvalTable[LEDBRA] = &Simulator::Evaluate_LEDBRA;
+  EvalTable[LDXBRA] = &Simulator::Evaluate_LDXBRA;
+  EvalTable[LEXBRA] = &Simulator::Evaluate_LEXBRA;
+  EvalTable[FIXBRA] = &Simulator::Evaluate_FIXBRA;
+  EvalTable[KXBR] = &Simulator::Evaluate_KXBR;
+  EvalTable[CXBR] = &Simulator::Evaluate_CXBR;
+  EvalTable[AXBR] = &Simulator::Evaluate_AXBR;
+  EvalTable[SXBR] = &Simulator::Evaluate_SXBR;
+  EvalTable[MXBR] = &Simulator::Evaluate_MXBR;
+  EvalTable[DXBR] = &Simulator::Evaluate_DXBR;
+  EvalTable[TBEDR] = &Simulator::Evaluate_TBEDR;
+  EvalTable[TBDR] = &Simulator::Evaluate_TBDR;
+  EvalTable[DIEBR] = &Simulator::Evaluate_DIEBR;
+  EvalTable[FIEBRA] = &Simulator::Evaluate_FIEBRA;
+  EvalTable[THDER] = &Simulator::Evaluate_THDER;
+  EvalTable[THDR] = &Simulator::Evaluate_THDR;
+  EvalTable[DIDBR] = &Simulator::Evaluate_DIDBR;
+  EvalTable[FIDBRA] = &Simulator::Evaluate_FIDBRA;
+  EvalTable[LXR] = &Simulator::Evaluate_LXR;
+  EvalTable[LPDFR] = &Simulator::Evaluate_LPDFR;
+  EvalTable[LNDFR] = &Simulator::Evaluate_LNDFR;
+  EvalTable[LCDFR] = &Simulator::Evaluate_LCDFR;
+  EvalTable[LZER] = &Simulator::Evaluate_LZER;
+  EvalTable[LZDR] = &Simulator::Evaluate_LZDR;
+  EvalTable[LZXR] = &Simulator::Evaluate_LZXR;
+  EvalTable[SFPC] = &Simulator::Evaluate_SFPC;
+  EvalTable[SFASR] = &Simulator::Evaluate_SFASR;
+  EvalTable[EFPC] = &Simulator::Evaluate_EFPC;
+  EvalTable[CELFBR] = &Simulator::Evaluate_CELFBR;
+  EvalTable[CDLFBR] = &Simulator::Evaluate_CDLFBR;
+  EvalTable[CXLFBR] = &Simulator::Evaluate_CXLFBR;
+  EvalTable[CEFBRA] = &Simulator::Evaluate_CEFBRA;
+  EvalTable[CDFBRA] = &Simulator::Evaluate_CDFBRA;
+  EvalTable[CXFBRA] = &Simulator::Evaluate_CXFBRA;
+  EvalTable[CFEBRA] = &Simulator::Evaluate_CFEBRA;
+  EvalTable[CFDBRA] = &Simulator::Evaluate_CFDBRA;
+  EvalTable[CFXBRA] = &Simulator::Evaluate_CFXBRA;
+  EvalTable[CLFEBR] = &Simulator::Evaluate_CLFEBR;
+  EvalTable[CLFDBR] = &Simulator::Evaluate_CLFDBR;
+  EvalTable[CLFXBR] = &Simulator::Evaluate_CLFXBR;
+  EvalTable[CELGBR] = &Simulator::Evaluate_CELGBR;
+  EvalTable[CDLGBR] = &Simulator::Evaluate_CDLGBR;
+  EvalTable[CXLGBR] = &Simulator::Evaluate_CXLGBR;
+  EvalTable[CEGBRA] = &Simulator::Evaluate_CEGBRA;
+  EvalTable[CDGBRA] = &Simulator::Evaluate_CDGBRA;
+  EvalTable[CXGBRA] = &Simulator::Evaluate_CXGBRA;
+  EvalTable[CGEBRA] = &Simulator::Evaluate_CGEBRA;
+  EvalTable[CGDBRA] = &Simulator::Evaluate_CGDBRA;
+  EvalTable[CGXBRA] = &Simulator::Evaluate_CGXBRA;
+  EvalTable[CLGEBR] = &Simulator::Evaluate_CLGEBR;
+  EvalTable[CLGDBR] = &Simulator::Evaluate_CLGDBR;
+  EvalTable[CFER] = &Simulator::Evaluate_CFER;
+  EvalTable[CFDR] = &Simulator::Evaluate_CFDR;
+  EvalTable[CFXR] = &Simulator::Evaluate_CFXR;
+  EvalTable[LDGR] = &Simulator::Evaluate_LDGR;
+  EvalTable[CGER] = &Simulator::Evaluate_CGER;
+  EvalTable[CGDR] = &Simulator::Evaluate_CGDR;
+  EvalTable[CGXR] = &Simulator::Evaluate_CGXR;
+  EvalTable[LGDR] = &Simulator::Evaluate_LGDR;
+  EvalTable[MDTR] = &Simulator::Evaluate_MDTR;
+  EvalTable[MDTRA] = &Simulator::Evaluate_MDTRA;
+  EvalTable[DDTRA] = &Simulator::Evaluate_DDTRA;
+  EvalTable[ADTRA] = &Simulator::Evaluate_ADTRA;
+  EvalTable[SDTRA] = &Simulator::Evaluate_SDTRA;
+  EvalTable[LDETR] = &Simulator::Evaluate_LDETR;
+  EvalTable[LEDTR] = &Simulator::Evaluate_LEDTR;
+  EvalTable[LTDTR] = &Simulator::Evaluate_LTDTR;
+  EvalTable[FIDTR] = &Simulator::Evaluate_FIDTR;
+  EvalTable[MXTRA] = &Simulator::Evaluate_MXTRA;
+  EvalTable[DXTRA] = &Simulator::Evaluate_DXTRA;
+  EvalTable[AXTRA] = &Simulator::Evaluate_AXTRA;
+  EvalTable[SXTRA] = &Simulator::Evaluate_SXTRA;
+  EvalTable[LXDTR] = &Simulator::Evaluate_LXDTR;
+  EvalTable[LDXTR] = &Simulator::Evaluate_LDXTR;
+  EvalTable[LTXTR] = &Simulator::Evaluate_LTXTR;
+  EvalTable[FIXTR] = &Simulator::Evaluate_FIXTR;
+  EvalTable[KDTR] = &Simulator::Evaluate_KDTR;
+  EvalTable[CGDTRA] = &Simulator::Evaluate_CGDTRA;
+  EvalTable[CUDTR] = &Simulator::Evaluate_CUDTR;
+  EvalTable[CDTR] = &Simulator::Evaluate_CDTR;
+  EvalTable[EEDTR] = &Simulator::Evaluate_EEDTR;
+  EvalTable[ESDTR] = &Simulator::Evaluate_ESDTR;
+  EvalTable[KXTR] = &Simulator::Evaluate_KXTR;
+  EvalTable[CGXTRA] = &Simulator::Evaluate_CGXTRA;
+  EvalTable[CUXTR] = &Simulator::Evaluate_CUXTR;
+  EvalTable[CSXTR] = &Simulator::Evaluate_CSXTR;
+  EvalTable[CXTR] = &Simulator::Evaluate_CXTR;
+  EvalTable[EEXTR] = &Simulator::Evaluate_EEXTR;
+  EvalTable[ESXTR] = &Simulator::Evaluate_ESXTR;
+  EvalTable[CDGTRA] = &Simulator::Evaluate_CDGTRA;
+  EvalTable[CDUTR] = &Simulator::Evaluate_CDUTR;
+  EvalTable[CDSTR] = &Simulator::Evaluate_CDSTR;
+  EvalTable[CEDTR] = &Simulator::Evaluate_CEDTR;
+  EvalTable[QADTR] = &Simulator::Evaluate_QADTR;
+  EvalTable[IEDTR] = &Simulator::Evaluate_IEDTR;
+  EvalTable[RRDTR] = &Simulator::Evaluate_RRDTR;
+  EvalTable[CXGTRA] = &Simulator::Evaluate_CXGTRA;
+  EvalTable[CXUTR] = &Simulator::Evaluate_CXUTR;
+  EvalTable[CXSTR] = &Simulator::Evaluate_CXSTR;
+  EvalTable[CEXTR] = &Simulator::Evaluate_CEXTR;
+  EvalTable[QAXTR] = &Simulator::Evaluate_QAXTR;
+  EvalTable[IEXTR] = &Simulator::Evaluate_IEXTR;
+  EvalTable[RRXTR] = &Simulator::Evaluate_RRXTR;
+  EvalTable[LPGR] = &Simulator::Evaluate_LPGR;
+  EvalTable[LNGR] = &Simulator::Evaluate_LNGR;
+  EvalTable[LTGR] = &Simulator::Evaluate_LTGR;
+  EvalTable[LCGR] = &Simulator::Evaluate_LCGR;
+  EvalTable[LGR] = &Simulator::Evaluate_LGR;
+  EvalTable[LGBR] = &Simulator::Evaluate_LGBR;
+  EvalTable[LGHR] = &Simulator::Evaluate_LGHR;
+  EvalTable[AGR] = &Simulator::Evaluate_AGR;
+  EvalTable[SGR] = &Simulator::Evaluate_SGR;
+  EvalTable[ALGR] = &Simulator::Evaluate_ALGR;
+  EvalTable[SLGR] = &Simulator::Evaluate_SLGR;
+  EvalTable[MSGR] = &Simulator::Evaluate_MSGR;
+  EvalTable[DSGR] = &Simulator::Evaluate_DSGR;
+  EvalTable[LRVGR] = &Simulator::Evaluate_LRVGR;
+  EvalTable[LPGFR] = &Simulator::Evaluate_LPGFR;
+  EvalTable[LNGFR] = &Simulator::Evaluate_LNGFR;
+  EvalTable[LTGFR] = &Simulator::Evaluate_LTGFR;
+  EvalTable[LCGFR] = &Simulator::Evaluate_LCGFR;
+  EvalTable[LGFR] = &Simulator::Evaluate_LGFR;
+  EvalTable[LLGFR] = &Simulator::Evaluate_LLGFR;
+  EvalTable[LLGTR] = &Simulator::Evaluate_LLGTR;
+  EvalTable[AGFR] = &Simulator::Evaluate_AGFR;
+  EvalTable[SGFR] = &Simulator::Evaluate_SGFR;
+  EvalTable[ALGFR] = &Simulator::Evaluate_ALGFR;
+  EvalTable[SLGFR] = &Simulator::Evaluate_SLGFR;
+  EvalTable[MSGFR] = &Simulator::Evaluate_MSGFR;
+  EvalTable[DSGFR] = &Simulator::Evaluate_DSGFR;
+  EvalTable[KMAC] = &Simulator::Evaluate_KMAC;
+  EvalTable[LRVR] = &Simulator::Evaluate_LRVR;
+  EvalTable[CGR] = &Simulator::Evaluate_CGR;
+  EvalTable[CLGR] = &Simulator::Evaluate_CLGR;
+  EvalTable[LBR] = &Simulator::Evaluate_LBR;
+  EvalTable[LHR] = &Simulator::Evaluate_LHR;
+  EvalTable[KMF] = &Simulator::Evaluate_KMF;
+  EvalTable[KMO] = &Simulator::Evaluate_KMO;
+  EvalTable[PCC] = &Simulator::Evaluate_PCC;
+  EvalTable[KMCTR] = &Simulator::Evaluate_KMCTR;
+  EvalTable[KM] = &Simulator::Evaluate_KM;
+  EvalTable[KMC] = &Simulator::Evaluate_KMC;
+  EvalTable[CGFR] = &Simulator::Evaluate_CGFR;
+  EvalTable[KIMD] = &Simulator::Evaluate_KIMD;
+  EvalTable[KLMD] = &Simulator::Evaluate_KLMD;
+  EvalTable[CFDTR] = &Simulator::Evaluate_CFDTR;
+  EvalTable[CLGDTR] = &Simulator::Evaluate_CLGDTR;
+  EvalTable[CLFDTR] = &Simulator::Evaluate_CLFDTR;
+  EvalTable[BCTGR] = &Simulator::Evaluate_BCTGR;
+  EvalTable[CFXTR] = &Simulator::Evaluate_CFXTR;
+  EvalTable[CLFXTR] = &Simulator::Evaluate_CLFXTR;
+  EvalTable[CDFTR] = &Simulator::Evaluate_CDFTR;
+  EvalTable[CDLGTR] = &Simulator::Evaluate_CDLGTR;
+  EvalTable[CDLFTR] = &Simulator::Evaluate_CDLFTR;
+  EvalTable[CXFTR] = &Simulator::Evaluate_CXFTR;
+  EvalTable[CXLGTR] = &Simulator::Evaluate_CXLGTR;
+  EvalTable[CXLFTR] = &Simulator::Evaluate_CXLFTR;
+  EvalTable[CGRT] = &Simulator::Evaluate_CGRT;
+  EvalTable[NGR] = &Simulator::Evaluate_NGR;
+  EvalTable[OGR] = &Simulator::Evaluate_OGR;
+  EvalTable[XGR] = &Simulator::Evaluate_XGR;
+  EvalTable[FLOGR] = &Simulator::Evaluate_FLOGR;
+  EvalTable[LLGCR] = &Simulator::Evaluate_LLGCR;
+  EvalTable[LLGHR] = &Simulator::Evaluate_LLGHR;
+  EvalTable[MLGR] = &Simulator::Evaluate_MLGR;
+  EvalTable[DLGR] = &Simulator::Evaluate_DLGR;
+  EvalTable[ALCGR] = &Simulator::Evaluate_ALCGR;
+  EvalTable[SLBGR] = &Simulator::Evaluate_SLBGR;
+  EvalTable[EPSW] = &Simulator::Evaluate_EPSW;
+  EvalTable[TRTT] = &Simulator::Evaluate_TRTT;
+  EvalTable[TRTO] = &Simulator::Evaluate_TRTO;
+  EvalTable[TROT] = &Simulator::Evaluate_TROT;
+  EvalTable[TROO] = &Simulator::Evaluate_TROO;
+  EvalTable[LLCR] = &Simulator::Evaluate_LLCR;
+  EvalTable[LLHR] = &Simulator::Evaluate_LLHR;
+  EvalTable[MLR] = &Simulator::Evaluate_MLR;
+  EvalTable[DLR] = &Simulator::Evaluate_DLR;
+  EvalTable[ALCR] = &Simulator::Evaluate_ALCR;
+  EvalTable[SLBR] = &Simulator::Evaluate_SLBR;
+  EvalTable[CU14] = &Simulator::Evaluate_CU14;
+  EvalTable[CU24] = &Simulator::Evaluate_CU24;
+  EvalTable[CU41] = &Simulator::Evaluate_CU41;
+  EvalTable[CU42] = &Simulator::Evaluate_CU42;
+  EvalTable[TRTRE] = &Simulator::Evaluate_TRTRE;
+  EvalTable[SRSTU] = &Simulator::Evaluate_SRSTU;
+  EvalTable[TRTE] = &Simulator::Evaluate_TRTE;
+  EvalTable[AHHHR] = &Simulator::Evaluate_AHHHR;
+  EvalTable[SHHHR] = &Simulator::Evaluate_SHHHR;
+  EvalTable[ALHHHR] = &Simulator::Evaluate_ALHHHR;
+  EvalTable[SLHHHR] = &Simulator::Evaluate_SLHHHR;
+  EvalTable[CHHR] = &Simulator::Evaluate_CHHR;
+  EvalTable[AHHLR] = &Simulator::Evaluate_AHHLR;
+  EvalTable[SHHLR] = &Simulator::Evaluate_SHHLR;
+  EvalTable[ALHHLR] = &Simulator::Evaluate_ALHHLR;
+  EvalTable[SLHHLR] = &Simulator::Evaluate_SLHHLR;
+  EvalTable[CHLR] = &Simulator::Evaluate_CHLR;
+  EvalTable[POPCNT_Z] = &Simulator::Evaluate_POPCNT_Z;
+  EvalTable[LOCGR] = &Simulator::Evaluate_LOCGR;
+  EvalTable[NGRK] = &Simulator::Evaluate_NGRK;
+  EvalTable[OGRK] = &Simulator::Evaluate_OGRK;
+  EvalTable[XGRK] = &Simulator::Evaluate_XGRK;
+  EvalTable[AGRK] = &Simulator::Evaluate_AGRK;
+  EvalTable[SGRK] = &Simulator::Evaluate_SGRK;
+  EvalTable[ALGRK] = &Simulator::Evaluate_ALGRK;
+  EvalTable[SLGRK] = &Simulator::Evaluate_SLGRK;
+  EvalTable[LOCR] = &Simulator::Evaluate_LOCR;
+  EvalTable[NRK] = &Simulator::Evaluate_NRK;
+  EvalTable[ORK] = &Simulator::Evaluate_ORK;
+  EvalTable[XRK] = &Simulator::Evaluate_XRK;
+  EvalTable[ARK] = &Simulator::Evaluate_ARK;
+  EvalTable[SRK] = &Simulator::Evaluate_SRK;
+  EvalTable[ALRK] = &Simulator::Evaluate_ALRK;
+  EvalTable[SLRK] = &Simulator::Evaluate_SLRK;
+  EvalTable[LTG] = &Simulator::Evaluate_LTG;
+  EvalTable[LG] = &Simulator::Evaluate_LG;
+  EvalTable[CVBY] = &Simulator::Evaluate_CVBY;
+  EvalTable[AG] = &Simulator::Evaluate_AG;
+  EvalTable[SG] = &Simulator::Evaluate_SG;
+  EvalTable[ALG] = &Simulator::Evaluate_ALG;
+  EvalTable[SLG] = &Simulator::Evaluate_SLG;
+  EvalTable[MSG] = &Simulator::Evaluate_MSG;
+  EvalTable[DSG] = &Simulator::Evaluate_DSG;
+  EvalTable[CVBG] = &Simulator::Evaluate_CVBG;
+  EvalTable[LRVG] = &Simulator::Evaluate_LRVG;
+  EvalTable[LT] = &Simulator::Evaluate_LT;
+  EvalTable[LGF] = &Simulator::Evaluate_LGF;
+  EvalTable[LGH] = &Simulator::Evaluate_LGH;
+  EvalTable[LLGF] = &Simulator::Evaluate_LLGF;
+  EvalTable[LLGT] = &Simulator::Evaluate_LLGT;
+  EvalTable[AGF] = &Simulator::Evaluate_AGF;
+  EvalTable[SGF] = &Simulator::Evaluate_SGF;
+  EvalTable[ALGF] = &Simulator::Evaluate_ALGF;
+  EvalTable[SLGF] = &Simulator::Evaluate_SLGF;
+  EvalTable[MSGF] = &Simulator::Evaluate_MSGF;
+  EvalTable[DSGF] = &Simulator::Evaluate_DSGF;
+  EvalTable[LRV] = &Simulator::Evaluate_LRV;
+  EvalTable[LRVH] = &Simulator::Evaluate_LRVH;
+  EvalTable[CG] = &Simulator::Evaluate_CG;
+  EvalTable[CLG] = &Simulator::Evaluate_CLG;
+  EvalTable[STG] = &Simulator::Evaluate_STG;
+  EvalTable[NTSTG] = &Simulator::Evaluate_NTSTG;
+  EvalTable[CVDY] = &Simulator::Evaluate_CVDY;
+  EvalTable[CVDG] = &Simulator::Evaluate_CVDG;
+  EvalTable[STRVG] = &Simulator::Evaluate_STRVG;
+  EvalTable[CGF] = &Simulator::Evaluate_CGF;
+  EvalTable[CLGF] = &Simulator::Evaluate_CLGF;
+  EvalTable[LTGF] = &Simulator::Evaluate_LTGF;
+  EvalTable[CGH] = &Simulator::Evaluate_CGH;
+  EvalTable[PFD] = &Simulator::Evaluate_PFD;
+  EvalTable[STRV] = &Simulator::Evaluate_STRV;
+  EvalTable[STRVH] = &Simulator::Evaluate_STRVH;
+  EvalTable[BCTG] = &Simulator::Evaluate_BCTG;
+  EvalTable[STY] = &Simulator::Evaluate_STY;
+  EvalTable[MSY] = &Simulator::Evaluate_MSY;
+  EvalTable[NY] = &Simulator::Evaluate_NY;
+  EvalTable[CLY] = &Simulator::Evaluate_CLY;
+  EvalTable[OY] = &Simulator::Evaluate_OY;
+  EvalTable[XY] = &Simulator::Evaluate_XY;
+  EvalTable[LY] = &Simulator::Evaluate_LY;
+  EvalTable[CY] = &Simulator::Evaluate_CY;
+  EvalTable[AY] = &Simulator::Evaluate_AY;
+  EvalTable[SY] = &Simulator::Evaluate_SY;
+  EvalTable[MFY] = &Simulator::Evaluate_MFY;
+  EvalTable[ALY] = &Simulator::Evaluate_ALY;
+  EvalTable[SLY] = &Simulator::Evaluate_SLY;
+  EvalTable[STHY] = &Simulator::Evaluate_STHY;
+  EvalTable[LAY] = &Simulator::Evaluate_LAY;
+  EvalTable[STCY] = &Simulator::Evaluate_STCY;
+  EvalTable[ICY] = &Simulator::Evaluate_ICY;
+  EvalTable[LAEY] = &Simulator::Evaluate_LAEY;
+  EvalTable[LB] = &Simulator::Evaluate_LB;
+  EvalTable[LGB] = &Simulator::Evaluate_LGB;
+  EvalTable[LHY] = &Simulator::Evaluate_LHY;
+  EvalTable[CHY] = &Simulator::Evaluate_CHY;
+  EvalTable[AHY] = &Simulator::Evaluate_AHY;
+  EvalTable[SHY] = &Simulator::Evaluate_SHY;
+  EvalTable[MHY] = &Simulator::Evaluate_MHY;
+  EvalTable[NG] = &Simulator::Evaluate_NG;
+  EvalTable[OG] = &Simulator::Evaluate_OG;
+  EvalTable[XG] = &Simulator::Evaluate_XG;
+  EvalTable[LGAT] = &Simulator::Evaluate_LGAT;
+  EvalTable[MLG] = &Simulator::Evaluate_MLG;
+  EvalTable[DLG] = &Simulator::Evaluate_DLG;
+  EvalTable[ALCG] = &Simulator::Evaluate_ALCG;
+  EvalTable[SLBG] = &Simulator::Evaluate_SLBG;
+  EvalTable[STPQ] = &Simulator::Evaluate_STPQ;
+  EvalTable[LPQ] = &Simulator::Evaluate_LPQ;
+  EvalTable[LLGC] = &Simulator::Evaluate_LLGC;
+  EvalTable[LLGH] = &Simulator::Evaluate_LLGH;
+  EvalTable[LLC] = &Simulator::Evaluate_LLC;
+  EvalTable[LLH] = &Simulator::Evaluate_LLH;
+  EvalTable[ML] = &Simulator::Evaluate_ML;
+  EvalTable[DL] = &Simulator::Evaluate_DL;
+  EvalTable[ALC] = &Simulator::Evaluate_ALC;
+  EvalTable[SLB] = &Simulator::Evaluate_SLB;
+  EvalTable[LLGTAT] = &Simulator::Evaluate_LLGTAT;
+  EvalTable[LLGFAT] = &Simulator::Evaluate_LLGFAT;
+  EvalTable[LAT] = &Simulator::Evaluate_LAT;
+  EvalTable[LBH] = &Simulator::Evaluate_LBH;
+  EvalTable[LLCH] = &Simulator::Evaluate_LLCH;
+  EvalTable[STCH] = &Simulator::Evaluate_STCH;
+  EvalTable[LHH] = &Simulator::Evaluate_LHH;
+  EvalTable[LLHH] = &Simulator::Evaluate_LLHH;
+  EvalTable[STHH] = &Simulator::Evaluate_STHH;
+  EvalTable[LFHAT] = &Simulator::Evaluate_LFHAT;
+  EvalTable[LFH] = &Simulator::Evaluate_LFH;
+  EvalTable[STFH] = &Simulator::Evaluate_STFH;
+  EvalTable[CHF] = &Simulator::Evaluate_CHF;
+  EvalTable[MVCDK] = &Simulator::Evaluate_MVCDK;
+  EvalTable[MVHHI] = &Simulator::Evaluate_MVHHI;
+  EvalTable[MVGHI] = &Simulator::Evaluate_MVGHI;
+  EvalTable[MVHI] = &Simulator::Evaluate_MVHI;
+  EvalTable[CHHSI] = &Simulator::Evaluate_CHHSI;
+  EvalTable[CGHSI] = &Simulator::Evaluate_CGHSI;
+  EvalTable[CHSI] = &Simulator::Evaluate_CHSI;
+  EvalTable[CLFHSI] = &Simulator::Evaluate_CLFHSI;
+  EvalTable[TBEGIN] = &Simulator::Evaluate_TBEGIN;
+  EvalTable[TBEGINC] = &Simulator::Evaluate_TBEGINC;
+  EvalTable[LMG] = &Simulator::Evaluate_LMG;
+  EvalTable[SRAG] = &Simulator::Evaluate_SRAG;
+  EvalTable[SLAG] = &Simulator::Evaluate_SLAG;
+  EvalTable[SRLG] = &Simulator::Evaluate_SRLG;
+  EvalTable[SLLG] = &Simulator::Evaluate_SLLG;
+  EvalTable[CSY] = &Simulator::Evaluate_CSY;
+  EvalTable[RLLG] = &Simulator::Evaluate_RLLG;
+  EvalTable[RLL] = &Simulator::Evaluate_RLL;
+  EvalTable[STMG] = &Simulator::Evaluate_STMG;
+  EvalTable[STMH] = &Simulator::Evaluate_STMH;
+  EvalTable[STCMH] = &Simulator::Evaluate_STCMH;
+  EvalTable[STCMY] = &Simulator::Evaluate_STCMY;
+  EvalTable[CDSY] = &Simulator::Evaluate_CDSY;
+  EvalTable[CDSG] = &Simulator::Evaluate_CDSG;
+  EvalTable[BXHG] = &Simulator::Evaluate_BXHG;
+  EvalTable[BXLEG] = &Simulator::Evaluate_BXLEG;
+  EvalTable[ECAG] = &Simulator::Evaluate_ECAG;
+  EvalTable[TMY] = &Simulator::Evaluate_TMY;
+  EvalTable[MVIY] = &Simulator::Evaluate_MVIY;
+  EvalTable[NIY] = &Simulator::Evaluate_NIY;
+  EvalTable[CLIY] = &Simulator::Evaluate_CLIY;
+  EvalTable[OIY] = &Simulator::Evaluate_OIY;
+  EvalTable[XIY] = &Simulator::Evaluate_XIY;
+  EvalTable[ASI] = &Simulator::Evaluate_ASI;
+  EvalTable[ALSI] = &Simulator::Evaluate_ALSI;
+  EvalTable[AGSI] = &Simulator::Evaluate_AGSI;
+  EvalTable[ALGSI] = &Simulator::Evaluate_ALGSI;
+  EvalTable[ICMH] = &Simulator::Evaluate_ICMH;
+  EvalTable[ICMY] = &Simulator::Evaluate_ICMY;
+  EvalTable[MVCLU] = &Simulator::Evaluate_MVCLU;
+  EvalTable[CLCLU] = &Simulator::Evaluate_CLCLU;
+  EvalTable[STMY] = &Simulator::Evaluate_STMY;
+  EvalTable[LMH] = &Simulator::Evaluate_LMH;
+  EvalTable[LMY] = &Simulator::Evaluate_LMY;
+  EvalTable[TP] = &Simulator::Evaluate_TP;
+  EvalTable[SRAK] = &Simulator::Evaluate_SRAK;
+  EvalTable[SLAK] = &Simulator::Evaluate_SLAK;
+  EvalTable[SRLK] = &Simulator::Evaluate_SRLK;
+  EvalTable[SLLK] = &Simulator::Evaluate_SLLK;
+  EvalTable[LOCG] = &Simulator::Evaluate_LOCG;
+  EvalTable[STOCG] = &Simulator::Evaluate_STOCG;
+  EvalTable[LANG] = &Simulator::Evaluate_LANG;
+  EvalTable[LAOG] = &Simulator::Evaluate_LAOG;
+  EvalTable[LAXG] = &Simulator::Evaluate_LAXG;
+  EvalTable[LAAG] = &Simulator::Evaluate_LAAG;
+  EvalTable[LAALG] = &Simulator::Evaluate_LAALG;
+  EvalTable[LOC] = &Simulator::Evaluate_LOC;
+  EvalTable[STOC] = &Simulator::Evaluate_STOC;
+  EvalTable[LAN] = &Simulator::Evaluate_LAN;
+  EvalTable[LAO] = &Simulator::Evaluate_LAO;
+  EvalTable[LAX] = &Simulator::Evaluate_LAX;
+  EvalTable[LAA] = &Simulator::Evaluate_LAA;
+  EvalTable[LAAL] = &Simulator::Evaluate_LAAL;
+  EvalTable[BRXHG] = &Simulator::Evaluate_BRXHG;
+  EvalTable[BRXLG] = &Simulator::Evaluate_BRXLG;
+  EvalTable[RISBLG] = &Simulator::Evaluate_RISBLG;
+  EvalTable[RNSBG] = &Simulator::Evaluate_RNSBG;
+  EvalTable[RISBG] = &Simulator::Evaluate_RISBG;
+  EvalTable[ROSBG] = &Simulator::Evaluate_ROSBG;
+  EvalTable[RXSBG] = &Simulator::Evaluate_RXSBG;
+  EvalTable[RISBGN] = &Simulator::Evaluate_RISBGN;
+  EvalTable[RISBHG] = &Simulator::Evaluate_RISBHG;
+  EvalTable[CGRJ] = &Simulator::Evaluate_CGRJ;
+  EvalTable[CGIT] = &Simulator::Evaluate_CGIT;
+  EvalTable[CIT] = &Simulator::Evaluate_CIT;
+  EvalTable[CLFIT] = &Simulator::Evaluate_CLFIT;
+  EvalTable[CGIJ] = &Simulator::Evaluate_CGIJ;
+  EvalTable[CIJ] = &Simulator::Evaluate_CIJ;
+  EvalTable[AHIK] = &Simulator::Evaluate_AHIK;
+  EvalTable[AGHIK] = &Simulator::Evaluate_AGHIK;
+  EvalTable[ALHSIK] = &Simulator::Evaluate_ALHSIK;
+  EvalTable[ALGHSIK] = &Simulator::Evaluate_ALGHSIK;
+  EvalTable[CGRB] = &Simulator::Evaluate_CGRB;
+  EvalTable[CGIB] = &Simulator::Evaluate_CGIB;
+  EvalTable[CIB] = &Simulator::Evaluate_CIB;
+  EvalTable[LDEB] = &Simulator::Evaluate_LDEB;
+  EvalTable[LXDB] = &Simulator::Evaluate_LXDB;
+  EvalTable[LXEB] = &Simulator::Evaluate_LXEB;
+  EvalTable[MXDB] = &Simulator::Evaluate_MXDB;
+  EvalTable[KEB] = &Simulator::Evaluate_KEB;
+  EvalTable[CEB] = &Simulator::Evaluate_CEB;
+  EvalTable[AEB] = &Simulator::Evaluate_AEB;
+  EvalTable[SEB] = &Simulator::Evaluate_SEB;
+  EvalTable[MDEB] = &Simulator::Evaluate_MDEB;
+  EvalTable[DEB] = &Simulator::Evaluate_DEB;
+  EvalTable[MAEB] = &Simulator::Evaluate_MAEB;
+  EvalTable[MSEB] = &Simulator::Evaluate_MSEB;
+  EvalTable[TCEB] = &Simulator::Evaluate_TCEB;
+  EvalTable[TCDB] = &Simulator::Evaluate_TCDB;
+  EvalTable[TCXB] = &Simulator::Evaluate_TCXB;
+  EvalTable[SQEB] = &Simulator::Evaluate_SQEB;
+  EvalTable[SQDB] = &Simulator::Evaluate_SQDB;
+  EvalTable[MEEB] = &Simulator::Evaluate_MEEB;
+  EvalTable[KDB] = &Simulator::Evaluate_KDB;
+  EvalTable[CDB] = &Simulator::Evaluate_CDB;
+  EvalTable[ADB] = &Simulator::Evaluate_ADB;
+  EvalTable[SDB] = &Simulator::Evaluate_SDB;
+  EvalTable[MDB] = &Simulator::Evaluate_MDB;
+  EvalTable[DDB] = &Simulator::Evaluate_DDB;
+  EvalTable[MADB] = &Simulator::Evaluate_MADB;
+  EvalTable[MSDB] = &Simulator::Evaluate_MSDB;
+  EvalTable[SLDT] = &Simulator::Evaluate_SLDT;
+  EvalTable[SRDT] = &Simulator::Evaluate_SRDT;
+  EvalTable[SLXT] = &Simulator::Evaluate_SLXT;
+  EvalTable[SRXT] = &Simulator::Evaluate_SRXT;
+  EvalTable[TDCET] = &Simulator::Evaluate_TDCET;
+  EvalTable[TDGET] = &Simulator::Evaluate_TDGET;
+  EvalTable[TDCDT] = &Simulator::Evaluate_TDCDT;
+  EvalTable[TDGDT] = &Simulator::Evaluate_TDGDT;
+  EvalTable[TDCXT] = &Simulator::Evaluate_TDCXT;
+  EvalTable[TDGXT] = &Simulator::Evaluate_TDGXT;
+  EvalTable[LEY] = &Simulator::Evaluate_LEY;
+  EvalTable[LDY] = &Simulator::Evaluate_LDY;
+  EvalTable[STEY] = &Simulator::Evaluate_STEY;
+  EvalTable[STDY] = &Simulator::Evaluate_STDY;
+  EvalTable[CZDT] = &Simulator::Evaluate_CZDT;
+  EvalTable[CZXT] = &Simulator::Evaluate_CZXT;
+  EvalTable[CDZT] = &Simulator::Evaluate_CDZT;
+  EvalTable[CXZT] = &Simulator::Evaluate_CXZT;
+}  // NOLINT
+
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
@@ -2587,6 +3347,7 @@
         SetS390OverflowCode(isOF);
         set_register(r1, r2_val - r3_val);
       }
+      break;
     }
     case AGHI:
     case MGHI: {
@@ -2987,17 +3748,19 @@
       RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
       int r1 = rrinst->R1Value();
       int r2 = rrinst->R2Value();
-#ifdef V8_TARGET_ARCH_S390X
+      if (op == LGBR) {
       int64_t r2_val = get_low_register<int64_t>(r2);
       r2_val <<= 56;
       r2_val >>= 56;
       set_register(r1, r2_val);
-#else
+      } else if (op == LBR) {
       int32_t r2_val = get_low_register<int32_t>(r2);
       r2_val <<= 24;
       r2_val >>= 24;
       set_low_register(r1, r2_val);
-#endif
+      } else {
+        UNREACHABLE();
+      }
       break;
     }
     case LGHR:
@@ -3005,17 +3768,19 @@
       RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
       int r1 = rrinst->R1Value();
       int r2 = rrinst->R2Value();
-#ifdef V8_TARGET_ARCH_S390X
+      if (op == LGHR) {
       int64_t r2_val = get_low_register<int64_t>(r2);
       r2_val <<= 48;
       r2_val >>= 48;
       set_register(r1, r2_val);
-#else
+      } else if (op == LHR) {
       int32_t r2_val = get_low_register<int32_t>(r2);
       r2_val <<= 16;
       r2_val >>= 16;
       set_low_register(r1, r2_val);
-#endif
+      } else {
+        UNREACHABLE();
+      }
       break;
     }
     case ALCR: {
@@ -4802,47 +5567,56 @@
   return result;
 }
 
-// Executes the current instruction.
-void Simulator::ExecuteInstruction(Instruction* instr, bool auto_incr_pc) {
-  if (v8::internal::FLAG_check_icache) {
-    CheckICache(isolate_->simulator_i_cache(), instr);
-  }
-  pc_modified_ = false;
-  if (::v8::internal::FLAG_trace_sim) {
-    disasm::NameConverter converter;
-    disasm::Disassembler dasm(converter);
-    // use a reasonably large buffer
-    v8::internal::EmbeddedVector<char, 256> buffer;
-    dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
-#ifdef V8_TARGET_ARCH_S390X
-    PrintF("%05ld  %08" V8PRIxPTR "  %s\n", icount_,
-           reinterpret_cast<intptr_t>(instr), buffer.start());
-#else
-    PrintF("%05lld  %08" V8PRIxPTR "  %s\n", icount_,
-           reinterpret_cast<intptr_t>(instr), buffer.start());
-#endif
-    // Flush stdout to prevent incomplete file output during abnormal exits
-    // This is caused by the output being buffered before being written to file
-    fflush(stdout);
-  }
-
-  // Try to simulate as S390 Instruction first.
-  bool processed = true;
-
+int Simulator::DecodeInstructionOriginal(Instruction* instr) {
   int instrLength = instr->InstructionLength();
+  bool processed = true;
   if (instrLength == 2)
     processed = DecodeTwoByte(instr);
   else if (instrLength == 4)
     processed = DecodeFourByte(instr);
   else if (instrLength == 6)
     processed = DecodeSixByte(instr);
+  return instrLength;
+}
 
-  if (processed) {
-    if (!pc_modified_ && auto_incr_pc) {
-      set_pc(reinterpret_cast<intptr_t>(instr) + instrLength);
-    }
-    return;
+int Simulator::DecodeInstruction(Instruction* instr) {
+  Opcode op = instr->S390OpcodeValue();
+  DCHECK(EvalTable[op] != NULL);
+  return (this->*EvalTable[op])(instr);
+}
+
+// Executes the current instruction.
+void Simulator::ExecuteInstruction(Instruction* instr, bool auto_incr_pc) {
+  icount_++;
+
+  if (v8::internal::FLAG_check_icache) {
+    CheckICache(isolate_->simulator_i_cache(), instr);
   }
+
+  pc_modified_ = false;
+
+  if (::v8::internal::FLAG_trace_sim) {
+    disasm::NameConverter converter;
+    disasm::Disassembler dasm(converter);
+    // use a reasonably large buffer
+    v8::internal::EmbeddedVector<char, 256> buffer;
+    dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+    PrintF("%05" PRId64 "  %08" V8PRIxPTR "  %s\n", icount_,
+           reinterpret_cast<intptr_t>(instr), buffer.start());
+
+    // Flush stdout to prevent incomplete file output during abnormal exits
+    // This is caused by the output being buffered before being written to file
+    fflush(stdout);
+  }
+
+  // Try to simulate as S390 Instruction first.
+  int length = DecodeInstruction(instr);
+
+  if (!pc_modified_ && auto_incr_pc) {
+    DCHECK(length == instr->InstructionLength());
+    set_pc(reinterpret_cast<intptr_t>(instr) + length);
+  }
+  return;
 }
 
 void Simulator::DebugStart() {
@@ -4860,7 +5634,6 @@
     // should be stopping at a particular executed instruction.
     while (program_counter != end_sim_pc) {
       Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
-      icount_++;
       ExecuteInstruction(instr);
       program_counter = get_pc();
     }
@@ -4869,7 +5642,6 @@
     // we reach the particular instuction count.
     while (program_counter != end_sim_pc) {
       Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
-      icount_++;
       if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
         S390Debugger dbg(this);
         dbg.Debug();
@@ -5121,6 +5893,2946 @@
   return address;
 }
 
+#define EVALUATE(name) \
+  int Simulator::Evaluate_##name(Instruction* instr)
+
+#define DCHECK_OPCODE(op) DCHECK(instr->S390OpcodeValue() == op)
+
+#define AS(type) reinterpret_cast<type*>(instr)
+
+#define DECODE_RIL_A_INSTRUCTION(r1, i2)               \
+  int r1 = AS(RILInstruction)->R1Value();              \
+  uint32_t i2 = AS(RILInstruction)->I2UnsignedValue(); \
+  int length = 6;
+
+#define DECODE_RIL_B_INSTRUCTION(r1, i2)      \
+  int r1 = AS(RILInstruction)->R1Value();     \
+  int32_t i2 = AS(RILInstruction)->I2Value(); \
+  int length = 6;
+
+#define DECODE_RIL_C_INSTRUCTION(m1, ri2)                               \
+  Condition m1 = static_cast<Condition>(AS(RILInstruction)->R1Value()); \
+  uint64_t ri2 = AS(RILInstruction)->I2Value();                         \
+  int length = 6;
+
+#define DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2) \
+  int r1 = AS(RXYInstruction)->R1Value();        \
+  int x2 = AS(RXYInstruction)->X2Value();        \
+  int b2 = AS(RXYInstruction)->B2Value();        \
+  int d2 = AS(RXYInstruction)->D2Value();        \
+  int length = 6;
+
+#define DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val) \
+  int x2 = AS(RXInstruction)->X2Value();            \
+  int b2 = AS(RXInstruction)->B2Value();            \
+  int r1 = AS(RXInstruction)->R1Value();            \
+  intptr_t d2_val = AS(RXInstruction)->D2Value();   \
+  int length = 4;
+
+#define DECODE_RS_A_INSTRUCTION(r1, r3, b2, d2) \
+  int r3 = AS(RSInstruction)->R3Value();        \
+  int b2 = AS(RSInstruction)->B2Value();        \
+  int r1 = AS(RSInstruction)->R1Value();        \
+  intptr_t d2 = AS(RSInstruction)->D2Value();   \
+  int length = 4;
+
+#define DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2) \
+  int b2 = AS(RSInstruction)->B2Value();          \
+  int r1 = AS(RSInstruction)->R1Value();          \
+  int d2 = AS(RSInstruction)->D2Value();          \
+  int length = 4;
+
+#define DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val) \
+  int b1 = AS(SIInstruction)->B1Value();                   \
+  intptr_t d1_val = AS(SIInstruction)->D1Value();          \
+  uint8_t imm_val = AS(SIInstruction)->I2Value();          \
+  int length = 4;
+
+#define DECODE_RRE_INSTRUCTION(r1, r2)    \
+  int r1 = AS(RREInstruction)->R1Value(); \
+  int r2 = AS(RREInstruction)->R2Value(); \
+  int length = 4;
+
+#define DECODE_RR_INSTRUCTION(r1, r2)    \
+  int r1 = AS(RRInstruction)->R1Value(); \
+  int r2 = AS(RRInstruction)->R2Value(); \
+  int length = 2;
+
+#define DECODE_RIE_D_INSTRUCTION(r1, r2, i2)  \
+  int r1 = AS(RIEInstruction)->R1Value();     \
+  int r2 = AS(RIEInstruction)->R2Value();     \
+  int32_t i2 = AS(RIEInstruction)->I6Value(); \
+  int length = 6;
+
+#define DECODE_RIE_F_INSTRUCTION(r1, r2, i3, i4, i5) \
+  int r1 = AS(RIEInstruction)->R1Value();            \
+  int r2 = AS(RIEInstruction)->R2Value();            \
+  uint32_t i3 = AS(RIEInstruction)->I3Value();       \
+  uint32_t i4 = AS(RIEInstruction)->I4Value();       \
+  uint32_t i5 = AS(RIEInstruction)->I5Value();       \
+  int length = 6;
+
+#define DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2) \
+  int r1 = AS(RSYInstruction)->R1Value();        \
+  int r3 = AS(RSYInstruction)->R3Value();        \
+  int b2 = AS(RSYInstruction)->B2Value();        \
+  intptr_t d2 = AS(RSYInstruction)->D2Value();   \
+  int length = 6;
+
+#define DECODE_RI_A_INSTRUCTION(instr, r1, i2) \
+  int32_t r1 = AS(RIInstruction)->R1Value();   \
+  int16_t i2 = AS(RIInstruction)->I2Value();   \
+  int length = 4;
+
+#define DECODE_RI_B_INSTRUCTION(instr, r1, i2) \
+  int32_t r1 = AS(RILInstruction)->R1Value();  \
+  int16_t i2 = AS(RILInstruction)->I2Value();  \
+  int length = 4;
+
+#define DECODE_RI_C_INSTRUCTION(instr, m1, i2)                         \
+  Condition m1 = static_cast<Condition>(AS(RIInstruction)->R1Value()); \
+  int16_t i2 = AS(RIInstruction)->I2Value();                           \
+  int length = 4;
+
+#define GET_ADDRESS(index_reg, base_reg, offset)       \
+  (((index_reg) == 0) ? 0 : get_register(index_reg)) + \
+      (((base_reg) == 0) ? 0 : get_register(base_reg)) + offset
+
+int Simulator::Evaluate_Unknown(Instruction* instr) {
+  UNREACHABLE();
+  return 0;
+}
+
+EVALUATE(CLR) {
+  DCHECK_OPCODE(CLR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  SetS390ConditionCode<uint32_t>(r1_val, r2_val);
+  return length;
+}
+
+EVALUATE(LR) {
+  DCHECK_OPCODE(LR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  set_low_register(r1, get_low_register<int32_t>(r2));
+  return length;
+}
+
+EVALUATE(AR) {
+  DCHECK_OPCODE(AR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  bool isOF = CheckOverflowForIntAdd(r1_val, r2_val, int32_t);
+  r1_val += r2_val;
+  SetS390ConditionCode<int32_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, r1_val);
+  return length;
+}
+
+EVALUATE(L) {
+  DCHECK_OPCODE(L);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int32_t mem_val = ReadW(addr, instr);
+  set_low_register(r1, mem_val);
+  return length;
+}
+
+EVALUATE(BRC) {
+  DCHECK_OPCODE(BRC);
+  DECODE_RI_C_INSTRUCTION(instr, m1, i2);
+
+  if (TestConditionCode(m1)) {
+    intptr_t offset = 2 * i2;
+    set_pc(get_pc() + offset);
+  }
+  return length;
+}
+
+EVALUATE(AHI) {
+  DCHECK_OPCODE(AHI);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  bool isOF = CheckOverflowForIntAdd(r1_val, i2, int32_t);
+  r1_val += i2;
+  set_low_register(r1, r1_val);
+  SetS390ConditionCode<int32_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(AGHI) {
+  DCHECK_OPCODE(AGHI);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+  int64_t r1_val = get_register(r1);
+  bool isOF = false;
+  isOF = CheckOverflowForIntAdd(r1_val, i2, int64_t);
+  r1_val += i2;
+  set_register(r1, r1_val);
+  SetS390ConditionCode<int64_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(BRCL) {
+  DCHECK_OPCODE(BRCL);
+  DECODE_RIL_C_INSTRUCTION(m1, ri2);
+
+  if (TestConditionCode(m1)) {
+    intptr_t offset = 2 * ri2;
+    set_pc(get_pc() + offset);
+  }
+  return length;
+}
+
+EVALUATE(IIHF) {
+  DCHECK_OPCODE(IIHF);
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  set_high_register(r1, imm);
+  return length;
+}
+
+EVALUATE(IILF) {
+  DCHECK_OPCODE(IILF);
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  set_low_register(r1, imm);
+  return length;
+}
+
+EVALUATE(LGR) {
+  DCHECK_OPCODE(LGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  set_register(r1, get_register(r2));
+  return length;
+}
+
+EVALUATE(LG) {
+  DCHECK_OPCODE(LG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  intptr_t addr = GET_ADDRESS(x2, b2, d2);
+  int64_t mem_val = ReadDW(addr);
+  set_register(r1, mem_val);
+  return length;
+}
+
+EVALUATE(AGR) {
+  DCHECK_OPCODE(AGR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = get_register(r2);
+  bool isOF = CheckOverflowForIntAdd(r1_val, r2_val, int64_t);
+  r1_val += r2_val;
+  set_register(r1, r1_val);
+  SetS390ConditionCode<int64_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(LGFR) {
+  DCHECK_OPCODE(LGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int64_t result = static_cast<int64_t>(r2_val);
+  set_register(r1, result);
+
+  return length;
+}
+
+EVALUATE(LBR) {
+  DCHECK_OPCODE(LBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  r2_val <<= 24;
+  r2_val >>= 24;
+  set_low_register(r1, r2_val);
+  return length;
+}
+
+EVALUATE(LGBR) {
+  DCHECK_OPCODE(LGBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_low_register<int64_t>(r2);
+  r2_val <<= 56;
+  r2_val >>= 56;
+  set_register(r1, r2_val);
+  return length;
+}
+
+EVALUATE(LHR) {
+  DCHECK_OPCODE(LHR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  r2_val <<= 16;
+  r2_val >>= 16;
+  set_low_register(r1, r2_val);
+  return length;
+}
+
+EVALUATE(LGHR) {
+  DCHECK_OPCODE(LGHR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_low_register<int64_t>(r2);
+  r2_val <<= 48;
+  r2_val >>= 48;
+  set_register(r1, r2_val);
+  return length;
+}
+
+EVALUATE(LGF) {
+  DCHECK_OPCODE(LGF);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  intptr_t addr = GET_ADDRESS(x2, b2, d2);
+  int64_t mem_val = static_cast<int64_t>(ReadW(addr, instr));
+  set_register(r1, mem_val);
+  return length;
+}
+
+EVALUATE(ST) {
+  DCHECK_OPCODE(ST);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  WriteW(addr, r1_val, instr);
+  return length;
+}
+
+EVALUATE(STG) {
+  DCHECK_OPCODE(STG);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  intptr_t addr = GET_ADDRESS(x2, b2, d2);
+  uint64_t value = get_register(r1);
+  WriteDW(addr, value);
+  return length;
+}
+
+EVALUATE(STY) {
+  DCHECK_OPCODE(STY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  intptr_t addr = GET_ADDRESS(x2, b2, d2);
+  uint32_t value = get_low_register<uint32_t>(r1);
+  WriteW(addr, value, instr);
+  return length;
+}
+
+EVALUATE(LY) {
+  DCHECK_OPCODE(LY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  intptr_t addr = GET_ADDRESS(x2, b2, d2);
+  uint32_t mem_val = ReadWU(addr, instr);
+  set_low_register(r1, mem_val);
+  return length;
+}
+
+EVALUATE(LLGC) {
+  DCHECK_OPCODE(LLGC);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  uint8_t mem_val = ReadBU(GET_ADDRESS(x2, b2, d2));
+  set_register(r1, static_cast<uint64_t>(mem_val));
+  return length;
+}
+
+EVALUATE(LLC) {
+  DCHECK_OPCODE(LLC);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  uint8_t mem_val = ReadBU(GET_ADDRESS(x2, b2, d2));
+  set_low_register(r1, static_cast<uint32_t>(mem_val));
+  return length;
+}
+
+EVALUATE(RLL) {
+  DCHECK_OPCODE(RLL);
+  DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+  // only takes rightmost 6 bits
+  int shiftBits = GET_ADDRESS(0, b2, d2) & 0x3F;
+  // unsigned
+  uint32_t r3_val = get_low_register<uint32_t>(r3);
+  uint32_t alu_out = 0;
+  uint32_t rotateBits = r3_val >> (32 - shiftBits);
+  alu_out = (r3_val << shiftBits) | (rotateBits);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(RISBG) {
+  DCHECK_OPCODE(RISBG);
+  DECODE_RIE_F_INSTRUCTION(r1, r2, i3, i4, i5);
+  // Starting Bit Position is Bits 2-7 of I3 field
+  uint32_t start_bit = i3 & 0x3F;
+  // Ending Bit Position is Bits 2-7 of I4 field
+  uint32_t end_bit = i4 & 0x3F;
+  // Shift Amount is Bits 2-7 of I5 field
+  uint32_t shift_amount = i5 & 0x3F;
+  // Zero out Remaining (unslected) bits if Bit 0 of I4 is 1.
+  bool zero_remaining = (0 != (i4 & 0x80));
+
+  uint64_t src_val = get_register(r2);
+
+  // Rotate Left by Shift Amount first
+  uint64_t rotated_val =
+      (src_val << shift_amount) | (src_val >> (64 - shift_amount));
+  int32_t width = end_bit - start_bit + 1;
+
+  uint64_t selection_mask = 0;
+  if (width < 64) {
+    selection_mask = (static_cast<uint64_t>(1) << width) - 1;
+  } else {
+    selection_mask = static_cast<uint64_t>(static_cast<int64_t>(-1));
+  }
+  selection_mask = selection_mask << (63 - end_bit);
+
+  uint64_t selected_val = rotated_val & selection_mask;
+
+  if (!zero_remaining) {
+    // Merged the unselected bits from the original value
+    selected_val = (src_val & ~selection_mask) | selected_val;
+  }
+
+  // Condition code is set by treating result as 64-bit signed int
+  SetS390ConditionCode<int64_t>(selected_val, 0);
+  set_register(r1, selected_val);
+  return length;
+}
+
+EVALUATE(AHIK) {
+  DCHECK_OPCODE(AHIK);
+  DECODE_RIE_D_INSTRUCTION(r1, r2, i2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int32_t imm = static_cast<int32_t>(i2);
+  bool isOF = CheckOverflowForIntAdd(r2_val, imm, int32_t);
+  set_low_register(r1, r2_val + imm);
+  SetS390ConditionCode<int32_t>(r2_val + imm, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(AGHIK) {
+  // 64-bit Add
+  DCHECK_OPCODE(AGHIK);
+  DECODE_RIE_D_INSTRUCTION(r1, r2, i2);
+  int64_t r2_val = get_register(r2);
+  int64_t imm = static_cast<int64_t>(i2);
+  bool isOF = CheckOverflowForIntAdd(r2_val, imm, int64_t);
+  set_register(r1, r2_val + imm);
+  SetS390ConditionCode<int64_t>(r2_val + imm, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(BKPT) {
+  DCHECK_OPCODE(BKPT);
+  set_pc(get_pc() + 2);
+  S390Debugger dbg(this);
+  dbg.Debug();
+  int length = 2;
+  return length;
+}
+
+EVALUATE(SPM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BALR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BCTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BCR) {
+  DCHECK_OPCODE(BCR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  if (TestConditionCode(Condition(r1))) {
+    intptr_t r2_val = get_register(r2);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+    // On 31-bit, the top most bit may be 0 or 1, but is ignored by the
+    // hardware.  Cleanse the top bit before jumping to it, unless it's one
+    // of the special PCs
+    if (r2_val != bad_lr && r2_val != end_sim_pc) r2_val &= 0x7FFFFFFF;
+#endif
+    set_pc(r2_val);
+  }
+
+  return length;
+}
+
+EVALUATE(SVC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BSM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BASSM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BASR) {
+  DCHECK_OPCODE(BASR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  intptr_t link_addr = get_pc() + 2;
+  // If R2 is zero, the BASR does not branch.
+  int64_t r2_val = (r2 == 0) ? link_addr : get_register(r2);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+  // On 31-bit, the top most bit may be 0 or 1, which can cause issues
+  // for stackwalker.  The top bit should either be cleanse before being
+  // pushed onto the stack, or during stack walking when dereferenced.
+  // For simulator, we'll take the worst case scenario and always tag
+  // the high bit, to flush out more problems.
+  link_addr |= 0x80000000;
+#endif
+  set_register(r1, link_addr);
+  set_pc(r2_val);
+  return length;
+}
+
+EVALUATE(MVCL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLCL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LNR) {
+  DCHECK_OPCODE(LNR);
+  // Load Negative (32)
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  r2_val = (r2_val >= 0) ? -r2_val : r2_val;  // If pos, then negate it.
+  set_low_register(r1, r2_val);
+  condition_reg_ = (r2_val == 0) ? CC_EQ : CC_LT;  // CC0 - result is zero
+  // CC1 - result is negative
+  return length;
+}
+
+EVALUATE(LTR) {
+  DCHECK_OPCODE(LTR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  SetS390ConditionCode<int32_t>(r2_val, 0);
+  set_low_register(r1, r2_val);
+  return length;
+}
+
+EVALUATE(LCR) {
+  DCHECK_OPCODE(LCR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int32_t original_r2_val = r2_val;
+  r2_val = ~r2_val;
+  r2_val = r2_val + 1;
+  set_low_register(r1, r2_val);
+  SetS390ConditionCode<int32_t>(r2_val, 0);
+  // Checks for overflow where r2_val = -2147483648.
+  // Cannot do int comparison due to GCC 4.8 bug on x86.
+  // Detect INT_MIN alternatively, as it is the only value where both
+  // original and result are negative due to overflow.
+  if (r2_val < 0 && original_r2_val < 0) {
+    SetS390OverflowCode(true);
+  }
+  return length;
+}
+
+EVALUATE(NR) {
+  DCHECK_OPCODE(NR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  r1_val &= r2_val;
+  SetS390BitWiseConditionCode<uint32_t>(r1_val);
+  set_low_register(r1, r1_val);
+  return length;
+}
+
+EVALUATE(OR) {
+  DCHECK_OPCODE(OR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  r1_val |= r2_val;
+  SetS390BitWiseConditionCode<uint32_t>(r1_val);
+  set_low_register(r1, r1_val);
+  return length;
+}
+
+EVALUATE(XR) {
+  DCHECK_OPCODE(XR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  r1_val ^= r2_val;
+  SetS390BitWiseConditionCode<uint32_t>(r1_val);
+  set_low_register(r1, r1_val);
+  return length;
+}
+
+EVALUATE(CR) {
+  DCHECK_OPCODE(CR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  SetS390ConditionCode<int32_t>(r1_val, r2_val);
+  return length;
+}
+
+EVALUATE(SR) {
+  DCHECK_OPCODE(SR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  bool isOF = false;
+  isOF = CheckOverflowForIntSub(r1_val, r2_val, int32_t);
+  r1_val -= r2_val;
+  SetS390ConditionCode<int32_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, r1_val);
+  return length;
+}
+
+EVALUATE(MR) {
+  DCHECK_OPCODE(MR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  DCHECK(r1 % 2 == 0);
+  r1_val = get_low_register<int32_t>(r1 + 1);
+  int64_t product = static_cast<int64_t>(r1_val) * static_cast<int64_t>(r2_val);
+  int32_t high_bits = product >> 32;
+  r1_val = high_bits;
+  int32_t low_bits = product & 0x00000000FFFFFFFF;
+  set_low_register(r1, high_bits);
+  set_low_register(r1 + 1, low_bits);
+  set_low_register(r1, r1_val);
+  return length;
+}
+
+EVALUATE(DR) {
+  DCHECK_OPCODE(DR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  // reg-reg pair should be even-odd pair, assert r1 is an even register
+  DCHECK(r1 % 2 == 0);
+  // leftmost 32 bits of the dividend are in r1
+  // rightmost 32 bits of the dividend are in r1+1
+  // get the signed value from r1
+  int64_t dividend = static_cast<int64_t>(r1_val) << 32;
+  // get unsigned value from r1+1
+  // avoid addition with sign-extended r1+1 value
+  dividend += get_low_register<uint32_t>(r1 + 1);
+  int32_t remainder = dividend % r2_val;
+  int32_t quotient = dividend / r2_val;
+  r1_val = remainder;
+  set_low_register(r1, remainder);
+  set_low_register(r1 + 1, quotient);
+  set_low_register(r1, r1_val);
+  return length;
+}
+
+EVALUATE(ALR) {
+  DCHECK_OPCODE(ALR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  uint32_t alu_out = 0;
+  bool isOF = false;
+  alu_out = r1_val + r2_val;
+  isOF = CheckOverflowForUIntAdd(r1_val, r2_val);
+  set_low_register(r1, alu_out);
+  SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+  return length;
+}
+
+EVALUATE(SLR) {
+  DCHECK_OPCODE(SLR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  uint32_t alu_out = 0;
+  bool isOF = false;
+  alu_out = r1_val - r2_val;
+  isOF = CheckOverflowForUIntSub(r1_val, r2_val);
+  set_low_register(r1, alu_out);
+  SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+  return length;
+}
+
+EVALUATE(LDR) {
+  DCHECK_OPCODE(LDR);
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_d_register(r2);
+  set_d_register(r1, r2_val);
+  return length;
+}
+
+EVALUATE(CDR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LER) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STH) {
+  DCHECK_OPCODE(STH);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int16_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t mem_addr = b2_val + x2_val + d2_val;
+  WriteH(mem_addr, r1_val, instr);
+
+  return length;
+}
+
+EVALUATE(LA) {
+  DCHECK_OPCODE(LA);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  set_register(r1, addr);
+  return length;
+}
+
+EVALUATE(STC) {
+  DCHECK_OPCODE(STC);
+  // Store Character/Byte
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  uint8_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t mem_addr = b2_val + x2_val + d2_val;
+  WriteB(mem_addr, r1_val);
+  return length;
+}
+
+EVALUATE(IC_z) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(EX) {
+  DCHECK_OPCODE(EX);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+
+  SixByteInstr the_instr = Instruction::InstructionBits(
+      reinterpret_cast<const byte*>(b2_val + x2_val + d2_val));
+  int inst_length = Instruction::InstructionLength(
+      reinterpret_cast<const byte*>(b2_val + x2_val + d2_val));
+
+  char new_instr_buf[8];
+  char* addr = reinterpret_cast<char*>(&new_instr_buf[0]);
+  the_instr |= static_cast<SixByteInstr>(r1_val & 0xff)
+               << (8 * inst_length - 16);
+  Instruction::SetInstructionBits<SixByteInstr>(
+      reinterpret_cast<byte*>(addr), static_cast<SixByteInstr>(the_instr));
+  ExecuteInstruction(reinterpret_cast<Instruction*>(addr), false);
+  return length;
+}
+
+EVALUATE(BAL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BCT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LH) {
+  DCHECK_OPCODE(LH);
+  // Load Halfword
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t mem_addr = x2_val + b2_val + d2_val;
+
+  int32_t result = static_cast<int32_t>(ReadH(mem_addr, instr));
+  set_low_register(r1, result);
+  return length;
+}
+
+EVALUATE(CH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AH) {
+  DCHECK_OPCODE(AH);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+  int32_t alu_out = 0;
+  bool isOF = false;
+  isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+  alu_out = r1_val + mem_val;
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+
+  return length;
+}
+
+EVALUATE(SH) {
+  DCHECK_OPCODE(SH);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+  int32_t alu_out = 0;
+  bool isOF = false;
+  isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
+  alu_out = r1_val - mem_val;
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+
+  return length;
+}
+
+EVALUATE(MH) {
+  DCHECK_OPCODE(MH);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+  int32_t alu_out = 0;
+  alu_out = r1_val * mem_val;
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(BAS) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CVD) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CVB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(N) {
+  DCHECK_OPCODE(N);
+  // 32-bit Reg-Mem instructions
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+  int32_t alu_out = 0;
+  alu_out = r1_val & mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(CL) {
+  DCHECK_OPCODE(CL);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int32_t mem_val = ReadW(addr, instr);
+  SetS390ConditionCode<uint32_t>(r1_val, mem_val);
+  return length;
+}
+
+EVALUATE(O) {
+  DCHECK_OPCODE(O);
+  // 32-bit Reg-Mem instructions
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+  int32_t alu_out = 0;
+  alu_out = r1_val | mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(X) {
+  DCHECK_OPCODE(X);
+  // 32-bit Reg-Mem instructions
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+  int32_t alu_out = 0;
+  alu_out = r1_val ^ mem_val;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(C) {
+  DCHECK_OPCODE(C);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int32_t mem_val = ReadW(addr, instr);
+  SetS390ConditionCode<int32_t>(r1_val, mem_val);
+  return length;
+}
+
+EVALUATE(A) {
+  DCHECK_OPCODE(A);
+  // 32-bit Reg-Mem instructions
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+  int32_t alu_out = 0;
+  bool isOF = false;
+  isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+  alu_out = r1_val + mem_val;
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(S) {
+  DCHECK_OPCODE(S);
+  // 32-bit Reg-Mem instructions
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+  int32_t alu_out = 0;
+  bool isOF = false;
+  isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
+  alu_out = r1_val - mem_val;
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(M) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(D) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STD) {
+  DCHECK_OPCODE(STD);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int64_t frs_val = get_d_register(r1);
+  WriteDW(addr, frs_val);
+  return length;
+}
+
+EVALUATE(LD) {
+  DCHECK_OPCODE(LD);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int64_t dbl_val = *reinterpret_cast<int64_t*>(addr);
+  set_d_register(r1, dbl_val);
+  return length;
+}
+
+EVALUATE(CD) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STE) {
+  DCHECK_OPCODE(STE);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  int64_t frs_val = get_d_register(r1) >> 32;
+  WriteW(addr, static_cast<int32_t>(frs_val), instr);
+  return length;
+}
+
+EVALUATE(MS) {
+  DCHECK_OPCODE(MS);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  set_low_register(r1, r1_val * mem_val);
+  return length;
+}
+
+EVALUATE(LE) {
+  DCHECK_OPCODE(LE);
+  DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t addr = b2_val + x2_val + d2_val;
+  float float_val = *reinterpret_cast<float*>(addr);
+  set_d_register_from_float32(r1, float_val);
+  return length;
+}
+
+EVALUATE(BRXH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BRXLE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BXH) {
+  DCHECK_OPCODE(BXH);
+  DECODE_RS_A_INSTRUCTION(r1, r3, b2, d2);
+
+  // r1_val is the first operand, r3_val is the increment
+  int32_t r1_val = r1 == 0 ? 0 : get_register(r1);
+  int32_t r3_val = r2 == 0 ? 0 : get_register(r3);
+  intptr_t b2_val = b2 == 0 ? 0 : get_register(b2);
+  intptr_t branch_address = b2_val + d2;
+  // increment r1_val
+  r1_val += r3_val;
+
+  // if the increment is even, then it designates a pair of registers
+  // and the contents of the even and odd registers of the pair are used as
+  // the increment and compare value respectively. If the increment is odd,
+  // the increment itself is used as both the increment and compare value
+  int32_t compare_val = r3 % 2 == 0 ? get_register(r3 + 1) : r3_val;
+  if (r1_val > compare_val) {
+    // branch to address if r1_val is greater than compare value
+    set_pc(branch_address);
+  }
+
+  // update contents of register in r1 with the new incremented value
+  set_register(r1, r1_val);
+
+  return length;
+}
+
+EVALUATE(BXLE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRL) {
+  DCHECK_OPCODE(SRL);
+  DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+  // only takes rightmost 6bits
+  int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t alu_out = 0;
+  alu_out = r1_val >> shiftBits;
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(SLL) {
+  DCHECK_OPCODE(SLL);
+  DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2)
+  // only takes rightmost 6bits
+  int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t alu_out = 0;
+  alu_out = r1_val << shiftBits;
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(SRA) {
+  DCHECK_OPCODE(SRA);
+  DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+  // only takes rightmost 6bits
+  int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t alu_out = 0;
+  bool isOF = false;
+  alu_out = r1_val >> shiftBits;
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(SLA) {
+  DCHECK_OPCODE(SLA);
+  DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+  // only takes rightmost 6bits
+  int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t alu_out = 0;
+  bool isOF = false;
+  isOF = CheckOverflowForShiftLeft(r1_val, shiftBits);
+  alu_out = r1_val << shiftBits;
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(SRDL) {
+  DCHECK_OPCODE(SRDL);
+  DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+  DCHECK(r1 % 2 == 0);  // must be a reg pair
+  // only takes rightmost 6bits
+  int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  uint64_t opnd1 = static_cast<uint64_t>(get_low_register<uint32_t>(r1)) << 32;
+  uint64_t opnd2 = static_cast<uint64_t>(get_low_register<uint32_t>(r1 + 1));
+  uint64_t r1_val = opnd1 | opnd2;
+  uint64_t alu_out = r1_val >> shiftBits;
+  set_low_register(r1, alu_out >> 32);
+  set_low_register(r1 + 1, alu_out & 0x00000000FFFFFFFF);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  return length;
+}
+
+EVALUATE(SLDL) {
+  DCHECK_OPCODE(SLDL);
+  DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+  // only takes rightmost 6bits
+  int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+
+  DCHECK(r1 % 2 == 0);
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  uint32_t r1_next_val = get_low_register<uint32_t>(r1 + 1);
+  uint64_t alu_out = (static_cast<uint64_t>(r1_val) << 32) |
+                     (static_cast<uint64_t>(r1_next_val));
+  alu_out <<= shiftBits;
+  set_low_register(r1 + 1, static_cast<uint32_t>(alu_out));
+  set_low_register(r1, static_cast<uint32_t>(alu_out >> 32));
+  return length;
+}
+
+EVALUATE(SRDA) {
+  DCHECK_OPCODE(SRDA);
+  DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+  DCHECK(r1 % 2 == 0);  // must be a reg pair
+  // only takes rightmost 6bits
+  int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+  int shiftBits = (b2_val + d2) & 0x3F;
+  int64_t opnd1 = static_cast<int64_t>(get_low_register<int32_t>(r1)) << 32;
+  int64_t opnd2 = static_cast<uint64_t>(get_low_register<uint32_t>(r1 + 1));
+  int64_t r1_val = opnd1 + opnd2;
+  int64_t alu_out = r1_val >> shiftBits;
+  set_low_register(r1, alu_out >> 32);
+  set_low_register(r1 + 1, alu_out & 0x00000000FFFFFFFF);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  return length;
+}
+
+EVALUATE(SLDA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STM) {
+  DCHECK_OPCODE(STM);
+  DECODE_RS_A_INSTRUCTION(r1, r3, rb, d2);
+  // Store Multiple 32-bits.
+  int offset = d2;
+  // Regs roll around if r3 is less than r1.
+  // Artifically increase r3 by 16 so we can calculate
+  // the number of regs stored properly.
+  if (r3 < r1) r3 += 16;
+
+  int32_t rb_val = (rb == 0) ? 0 : get_low_register<int32_t>(rb);
+
+  // Store each register in ascending order.
+  for (int i = 0; i <= r3 - r1; i++) {
+    int32_t value = get_low_register<int32_t>((r1 + i) % 16);
+    WriteW(rb_val + offset + 4 * i, value, instr);
+  }
+  return length;
+}
+
+EVALUATE(TM) {
+  DCHECK_OPCODE(TM);
+  // Test Under Mask (Mem - Imm) (8)
+  DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
+  int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+  intptr_t addr = b1_val + d1_val;
+  uint8_t mem_val = ReadB(addr);
+  uint8_t selected_bits = mem_val & imm_val;
+  // CC0: Selected bits are zero
+  // CC1: Selected bits mixed zeros and ones
+  // CC3: Selected bits all ones
+  if (0 == selected_bits) {
+    condition_reg_ = CC_EQ;  // CC0
+  } else if (selected_bits == imm_val) {
+    condition_reg_ = 0x1;  // CC3
+  } else {
+    condition_reg_ = 0x4;  // CC1
+  }
+  return length;
+}
+
+EVALUATE(MVI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TS) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLI) {
+  DCHECK_OPCODE(CLI);
+  // Compare Immediate (Mem - Imm) (8)
+  DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
+  int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+  intptr_t addr = b1_val + d1_val;
+  uint8_t mem_val = ReadB(addr);
+  SetS390ConditionCode<uint8_t>(mem_val, imm_val);
+  return length;
+}
+
+EVALUATE(OI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LM) {
+  DCHECK_OPCODE(LM);
+  DECODE_RS_A_INSTRUCTION(r1, r3, rb, d2);
+  // Store Multiple 32-bits.
+  int offset = d2;
+  // Regs roll around if r3 is less than r1.
+  // Artifically increase r3 by 16 so we can calculate
+  // the number of regs stored properly.
+  if (r3 < r1) r3 += 16;
+
+  int32_t rb_val = (rb == 0) ? 0 : get_low_register<int32_t>(rb);
+
+  // Store each register in ascending order.
+  for (int i = 0; i <= r3 - r1; i++) {
+    int32_t value = ReadW(rb_val + offset + 4 * i, instr);
+    set_low_register((r1 + i) % 16, value);
+  }
+  return length;
+}
+
+EVALUATE(MVCLE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLCLE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDS) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ICM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BPRP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BPP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TRTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVN) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVC) {
+  DCHECK_OPCODE(MVC);
+  // Move Character
+  SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
+  int b1 = ssInstr->B1Value();
+  intptr_t d1 = ssInstr->D1Value();
+  int b2 = ssInstr->B2Value();
+  intptr_t d2 = ssInstr->D2Value();
+  int length = ssInstr->Length();
+  int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  intptr_t src_addr = b2_val + d2;
+  intptr_t dst_addr = b1_val + d1;
+  // remember that the length is the actual length - 1
+  for (int i = 0; i < length + 1; ++i) {
+    WriteB(dst_addr++, ReadB(src_addr++));
+  }
+  length = 6;
+  return length;
+}
+
+EVALUATE(MVZ) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(OC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVCP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TRT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ED) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(EDMK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PKU) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(UNPKU) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVCIN) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PKA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(UNPKA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PLO) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LMD) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVO) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PACK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(UNPK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ZAP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(UPT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PFPO) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(IIHH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(IIHL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(IILH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(IILL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NIHH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NIHL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NILH) {
+  DCHECK_OPCODE(NILH);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  // CC is set based on the 16 bits that are AND'd
+  SetS390BitWiseConditionCode<uint16_t>((r1_val >> 16) & i);
+  i = (i << 16) | 0x0000FFFF;
+  set_low_register(r1, r1_val & i);
+  return length;
+}
+
+EVALUATE(NILL) {
+  DCHECK_OPCODE(NILL);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  // CC is set based on the 16 bits that are AND'd
+  SetS390BitWiseConditionCode<uint16_t>(r1_val & i);
+  i |= 0xFFFF0000;
+  set_low_register(r1, r1_val & i);
+  return length;
+}
+
+EVALUATE(OIHH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(OIHL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(OILH) {
+  DCHECK_OPCODE(OILH);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  // CC is set based on the 16 bits that are AND'd
+  SetS390BitWiseConditionCode<uint16_t>((r1_val >> 16) | i);
+  i = i << 16;
+  set_low_register(r1, r1_val | i);
+  return length;
+}
+
+EVALUATE(OILL) {
+  DCHECK_OPCODE(OILL);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  // CC is set based on the 16 bits that are AND'd
+  SetS390BitWiseConditionCode<uint16_t>(r1_val | i);
+  set_low_register(r1, r1_val | i);
+  return length;
+}
+
+EVALUATE(LLIHH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLIHL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLILH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLILL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TMLH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TMLL) {
+  DCHECK_OPCODE(TMLL);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+  int mask = i2 & 0x0000FFFF;
+  if (mask == 0) {
+    condition_reg_ = 0x0;
+    return length;
+  }
+  uint32_t r1_val = get_low_register<uint32_t>(r1);
+  r1_val = r1_val & 0x0000FFFF;  // uses only the last 16bits
+
+  // Test if all selected bits are Zero
+  bool allSelectedBitsAreZeros = true;
+  for (int i = 0; i < 15; i++) {
+    if (mask & (1 << i)) {
+      if (r1_val & (1 << i)) {
+        allSelectedBitsAreZeros = false;
+        break;
+      }
+    }
+  }
+  if (allSelectedBitsAreZeros) {
+    condition_reg_ = 0x8;
+    return length;  // Done!
+  }
+
+  // Test if all selected bits are one
+  bool allSelectedBitsAreOnes = true;
+  for (int i = 0; i < 15; i++) {
+    if (mask & (1 << i)) {
+      if (!(r1_val & (1 << i))) {
+        allSelectedBitsAreOnes = false;
+        break;
+      }
+    }
+  }
+  if (allSelectedBitsAreOnes) {
+    condition_reg_ = 0x1;
+    return length;  // Done!
+  }
+
+  // Now we know selected bits mixed zeros and ones
+  // Test if the leftmost bit is zero or one
+  for (int i = 14; i >= 0; i--) {
+    if (mask & (1 << i)) {
+      if (r1_val & (1 << i)) {
+        // leftmost bit is one
+        condition_reg_ = 0x2;
+      } else {
+        // leftmost bit is zero
+        condition_reg_ = 0x4;
+      }
+      return length;  // Done!
+    }
+  }
+  return length;
+}
+
+EVALUATE(TMHH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TMHL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BRAS) {
+  DCHECK_OPCODE(BRAS);
+  // Branch Relative and Save
+  DECODE_RI_B_INSTRUCTION(instr, r1, d2)
+  intptr_t pc = get_pc();
+  // Set PC of next instruction to register
+  set_register(r1, pc + sizeof(FourByteInstr));
+  // Update PC to branch target
+  set_pc(pc + d2 * 2);
+  return length;
+}
+
+EVALUATE(BRCT) {
+  DCHECK_OPCODE(BRCT);
+  // Branch On Count (32/64).
+  DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+  int64_t value = get_low_register<int32_t>(r1);
+  set_low_register(r1, --value);
+  // Branch if value != 0
+  if (value != 0) {
+    intptr_t offset = i2 * 2;
+    set_pc(get_pc() + offset);
+  }
+  return length;
+}
+
+EVALUATE(BRCTG) {
+  DCHECK_OPCODE(BRCTG);
+  // Branch On Count (32/64).
+  DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+  int64_t value = get_register(r1);
+  set_register(r1, --value);
+  // Branch if value != 0
+  if (value != 0) {
+    intptr_t offset = i2 * 2;
+    set_pc(get_pc() + offset);
+  }
+  return length;
+}
+
+EVALUATE(LHI) {
+  DCHECK_OPCODE(LHI);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i);
+  set_low_register(r1, i);
+  return length;
+}
+
+EVALUATE(LGHI) {
+  DCHECK_OPCODE(LGHI);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+  int64_t i = static_cast<int64_t>(i2);
+  set_register(r1, i);
+  return length;
+}
+
+EVALUATE(MHI) {
+  DCHECK_OPCODE(MHI);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  bool isOF = false;
+  isOF = CheckOverflowForMul(r1_val, i);
+  r1_val *= i;
+  set_low_register(r1, r1_val);
+  SetS390ConditionCode<int32_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(MGHI) {
+  DCHECK_OPCODE(MGHI);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+  int64_t i = static_cast<int64_t>(i2);
+  int64_t r1_val = get_register(r1);
+  bool isOF = false;
+  isOF = CheckOverflowForMul(r1_val, i);
+  r1_val *= i;
+  set_register(r1, r1_val);
+  SetS390ConditionCode<int32_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(CHI) {
+  DCHECK_OPCODE(CHI);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  SetS390ConditionCode<int32_t>(r1_val, i);
+  return length;
+}
+
+EVALUATE(CGHI) {
+  DCHECK_OPCODE(CGHI);
+  DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+  int64_t i = static_cast<int64_t>(i2);
+  int64_t r1_val = get_register(r1);
+  SetS390ConditionCode<int64_t>(r1_val, i);
+  return length;
+}
+
+EVALUATE(LARL) {
+  DCHECK_OPCODE(LARL);
+  DECODE_RIL_B_INSTRUCTION(r1, i2);
+  intptr_t offset = i2 * 2;
+  set_register(r1, get_pc() + offset);
+  return length;
+}
+
+EVALUATE(LGFI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BRASL) {
+  DCHECK_OPCODE(BRASL);
+  // Branch and Save Relative Long
+  DECODE_RIL_B_INSTRUCTION(r1, i2);
+  intptr_t d2 = i2;
+  intptr_t pc = get_pc();
+  set_register(r1, pc + 6);  // save next instruction to register
+  set_pc(pc + d2 * 2);       // update register
+  return length;
+}
+
+EVALUATE(XIHF) {
+  DCHECK_OPCODE(XIHF);
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  uint32_t alu_out = 0;
+  alu_out = get_high_register<uint32_t>(r1);
+  alu_out = alu_out ^ imm;
+  set_high_register(r1, alu_out);
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  return length;
+}
+
+EVALUATE(XILF) {
+  DCHECK_OPCODE(XILF);
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  uint32_t alu_out = 0;
+  alu_out = get_low_register<uint32_t>(r1);
+  alu_out = alu_out ^ imm;
+  set_low_register(r1, alu_out);
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  return length;
+}
+
+EVALUATE(NIHF) {
+  DCHECK_OPCODE(NIHF);
+  // Bitwise Op on upper 32-bits
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  uint32_t alu_out = get_high_register<uint32_t>(r1);
+  alu_out &= imm;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_high_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(NILF) {
+  DCHECK_OPCODE(NILF);
+  // Bitwise Op on lower 32-bits
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  uint32_t alu_out = get_low_register<uint32_t>(r1);
+  alu_out &= imm;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(OIHF) {
+  DCHECK_OPCODE(OIHF);
+  // Bitwise Op on upper 32-bits
+  DECODE_RIL_B_INSTRUCTION(r1, imm);
+  uint32_t alu_out = get_high_register<uint32_t>(r1);
+  alu_out |= imm;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_high_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(OILF) {
+  DCHECK_OPCODE(OILF);
+  // Bitwise Op on lower 32-bits
+  DECODE_RIL_B_INSTRUCTION(r1, imm);
+  uint32_t alu_out = get_low_register<uint32_t>(r1);
+  alu_out |= imm;
+  SetS390BitWiseConditionCode<uint32_t>(alu_out);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(LLIHF) {
+  DCHECK_OPCODE(LLIHF);
+  // Load Logical Immediate into high word
+  DECODE_RIL_A_INSTRUCTION(r1, i2);
+  uint64_t imm = static_cast<uint64_t>(i2);
+  set_register(r1, imm << 32);
+  return length;
+}
+
+EVALUATE(LLILF) {
+  DCHECK_OPCODE(LLILF);
+  // Load Logical into lower 32-bits (zero extend upper 32-bits)
+  DECODE_RIL_A_INSTRUCTION(r1, i2);
+  uint64_t imm = static_cast<uint64_t>(i2);
+  set_register(r1, imm);
+  return length;
+}
+
+EVALUATE(MSGFI) {
+  DCHECK_OPCODE(MSGFI);
+  DECODE_RIL_B_INSTRUCTION(r1, i2);
+  int64_t alu_out = get_register(r1);
+  alu_out = alu_out * i2;
+  set_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(MSFI) {
+  DCHECK_OPCODE(MSFI);
+  DECODE_RIL_B_INSTRUCTION(r1, i2);
+  int32_t alu_out = get_low_register<int32_t>(r1);
+  alu_out = alu_out * i2;
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(SLGFI) {
+  DCHECK_OPCODE(SLGFI);
+#ifndef V8_TARGET_ARCH_S390X
+  // should only be called on 64bit
+  DCHECK(false);
+#endif
+  DECODE_RIL_A_INSTRUCTION(r1, i2);
+  uint64_t r1_val = (uint64_t)(get_register(r1));
+  uint64_t alu_out;
+  alu_out = r1_val - i2;
+  set_register(r1, (intptr_t)alu_out);
+  SetS390ConditionCode<uint64_t>(alu_out, 0);
+  return length;
+}
+
+EVALUATE(SLFI) {
+  DCHECK_OPCODE(SLFI);
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  uint32_t alu_out = get_low_register<uint32_t>(r1);
+  alu_out -= imm;
+  SetS390ConditionCode<uint32_t>(alu_out, 0);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(AGFI) {
+  DCHECK_OPCODE(AGFI);
+  // Clobbering Add Word Immediate
+  DECODE_RIL_B_INSTRUCTION(r1, i2_val);
+  bool isOF = false;
+  // 64-bit Add (Register + 32-bit Imm)
+  int64_t r1_val = get_register(r1);
+  int64_t i2 = static_cast<int64_t>(i2_val);
+  isOF = CheckOverflowForIntAdd(r1_val, i2, int64_t);
+  int64_t alu_out = r1_val + i2;
+  set_register(r1, alu_out);
+  SetS390ConditionCode<int64_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(AFI) {
+  DCHECK_OPCODE(AFI);
+  // Clobbering Add Word Immediate
+  DECODE_RIL_B_INSTRUCTION(r1, i2);
+  bool isOF = false;
+  // 32-bit Add (Register + 32-bit Immediate)
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  isOF = CheckOverflowForIntAdd(r1_val, i2, int32_t);
+  int32_t alu_out = r1_val + i2;
+  set_low_register(r1, alu_out);
+  SetS390ConditionCode<int32_t>(alu_out, 0);
+  SetS390OverflowCode(isOF);
+  return length;
+}
+
+EVALUATE(ALGFI) {
+  DCHECK_OPCODE(ALGFI);
+#ifndef V8_TARGET_ARCH_S390X
+  // should only be called on 64bit
+  DCHECK(false);
+#endif
+  DECODE_RIL_A_INSTRUCTION(r1, i2);
+  uint64_t r1_val = (uint64_t)(get_register(r1));
+  uint64_t alu_out;
+  alu_out = r1_val + i2;
+  set_register(r1, (intptr_t)alu_out);
+  SetS390ConditionCode<uint64_t>(alu_out, 0);
+
+  return length;
+}
+
+EVALUATE(ALFI) {
+  DCHECK_OPCODE(ALFI);
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  uint32_t alu_out = get_low_register<uint32_t>(r1);
+  alu_out += imm;
+  SetS390ConditionCode<uint32_t>(alu_out, 0);
+  set_low_register(r1, alu_out);
+  return length;
+}
+
+EVALUATE(CGFI) {
+  DCHECK_OPCODE(CGFI);
+  // Compare with Immediate (64)
+  DECODE_RIL_B_INSTRUCTION(r1, i2);
+  int64_t imm = static_cast<int64_t>(i2);
+  SetS390ConditionCode<int64_t>(get_register(r1), imm);
+  return length;
+}
+
+EVALUATE(CFI) {
+  DCHECK_OPCODE(CFI);
+  // Compare with Immediate (32)
+  DECODE_RIL_B_INSTRUCTION(r1, imm);
+  SetS390ConditionCode<int32_t>(get_low_register<int32_t>(r1), imm);
+  return length;
+}
+
+EVALUATE(CLGFI) {
+  DCHECK_OPCODE(CLGFI);
+  // Compare Logical with Immediate (64)
+  DECODE_RIL_A_INSTRUCTION(r1, i2);
+  uint64_t imm = static_cast<uint64_t>(i2);
+  SetS390ConditionCode<uint64_t>(get_register(r1), imm);
+  return length;
+}
+
+EVALUATE(CLFI) {
+  DCHECK_OPCODE(CLFI);
+  // Compare Logical with Immediate (32)
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  SetS390ConditionCode<uint32_t>(get_low_register<uint32_t>(r1), imm);
+  return length;
+}
+
+EVALUATE(LLHRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LGHRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LHRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGHRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STHRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LGRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STGRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LGFRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGFRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(EXRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PFDRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGHRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CHRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGFRL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ECTG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CSST) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPD) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPDG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BRCTH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AIH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALSIH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALSIHN) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CIH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(IPM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(HSCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SSCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STSCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TSCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TPI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SAL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RSCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCRW) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCPS) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RCHP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SCHM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CKSM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SAR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(EAR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSR) {
+  DCHECK_OPCODE(MSR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int32_t r1_val = get_low_register<int32_t>(r1);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  set_low_register(r1, r1_val * r2_val);
+  return length;
+}
+
+EVALUATE(MVST) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CUSE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRST) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XSCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCKE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCKF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRNM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STFPC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LFPC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TRE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CUUTF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CUTFU) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STFLE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRNMB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRNMT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LFAS) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PPA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ETND) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TEND) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NIAI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TABORT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TRAP4) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPEBR) {
+  DCHECK_OPCODE(LPEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float fr1_val = get_float32_from_d_register(r1);
+  float fr2_val = get_float32_from_d_register(r2);
+  fr1_val = std::fabs(fr2_val);
+  set_d_register_from_float32(r1, fr1_val);
+  if (fr2_val != fr2_val) {  // input is NaN
+    condition_reg_ = CC_OF;
+  } else if (fr2_val == 0) {
+    condition_reg_ = CC_EQ;
+  } else {
+    condition_reg_ = CC_GT;
+  }
+
+  return length;
+}
+
+EVALUATE(LNEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTEBR) {
+  DCHECK_OPCODE(LTEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_d_register(r2);
+  float fr2_val = get_float32_from_d_register(r2);
+  SetS390ConditionCode<float>(fr2_val, 0.0);
+  set_d_register(r1, r2_val);
+  return length;
+}
+
+EVALUATE(LCEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LDEBR) {
+  DCHECK_OPCODE(LDEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float fp_val = get_float32_from_d_register(r2);
+  double db_val = static_cast<double>(fp_val);
+  set_d_register_from_double(r1, db_val);
+  return length;
+}
+
+EVALUATE(LXDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LXEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MXDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CEBR) {
+  DCHECK_OPCODE(CEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float fr1_val = get_float32_from_d_register(r1);
+  float fr2_val = get_float32_from_d_register(r2);
+  if (isNaN(fr1_val) || isNaN(fr2_val)) {
+    condition_reg_ = CC_OF;
+  } else {
+    SetS390ConditionCode<float>(fr1_val, fr2_val);
+  }
+
+  return length;
+}
+
+EVALUATE(AEBR) {
+  DCHECK_OPCODE(AEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float fr1_val = get_float32_from_d_register(r1);
+  float fr2_val = get_float32_from_d_register(r2);
+  fr1_val += fr2_val;
+  set_d_register_from_float32(r1, fr1_val);
+  SetS390ConditionCode<float>(fr1_val, 0);
+
+  return length;
+}
+
+EVALUATE(SEBR) {
+  DCHECK_OPCODE(SEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float fr1_val = get_float32_from_d_register(r1);
+  float fr2_val = get_float32_from_d_register(r2);
+  fr1_val -= fr2_val;
+  set_d_register_from_float32(r1, fr1_val);
+  SetS390ConditionCode<float>(fr1_val, 0);
+
+  return length;
+}
+
+EVALUATE(MDEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DEBR) {
+  DCHECK_OPCODE(DEBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  float fr1_val = get_float32_from_d_register(r1);
+  float fr2_val = get_float32_from_d_register(r2);
+  fr1_val /= fr2_val;
+  set_d_register_from_float32(r1, fr1_val);
+  SetS390ConditionCode<float>(fr1_val, 0);
+
+  return length;
+}
+
+EVALUATE(MAEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPDBR) {
+  DCHECK_OPCODE(LPDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  r1_val = std::fabs(r2_val);
+  set_d_register_from_double(r1, r1_val);
+  if (r2_val != r2_val) {  // input is NaN
+    condition_reg_ = CC_OF;
+  } else if (r2_val == 0) {
+    condition_reg_ = CC_EQ;
+  } else {
+    condition_reg_ = CC_GT;
+  }
+  return length;
+}
+
+EVALUATE(LNDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTDBR) {
+  DCHECK_OPCODE(LTDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_d_register(r2);
+  SetS390ConditionCode<double>(bit_cast<double, int64_t>(r2_val), 0.0);
+  set_d_register(r1, r2_val);
+  return length;
+}
+
+EVALUATE(LCDBR) {
+  DCHECK_OPCODE(LCDBR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  double r1_val = get_double_from_d_register(r1);
+  double r2_val = get_double_from_d_register(r2);
+  r1_val = -r2_val;
+  set_d_register_from_double(r1, r1_val);
+  if (r2_val != r2_val) {  // input is NaN
+    condition_reg_ = CC_OF;
+  } else if (r2_val == 0) {
+    condition_reg_ = CC_EQ;
+  } else if (r2_val < 0) {
+    condition_reg_ = CC_LT;
+  } else if (r2_val > 0) {
+    condition_reg_ = CC_GT;
+  }
+  return length;
+}
+
+EVALUATE(SQEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SQDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SQXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MEEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ADBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MADBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LNXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LCXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LEDBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LDXBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LEXBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(FIXBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TBEDR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TBDR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DIEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(FIEBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(THDER) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(THDR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DIDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(FIDBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LXR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPDFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LNDFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LCDFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LZER) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LZDR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LZXR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SFPC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SFASR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(EFPC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CELFBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDLFBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXLFBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CEFBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDFBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXFBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFEBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFDBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFXBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLFEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLFDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLFXBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CELGBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDLGBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXLGBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CEGBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDGBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXGBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGEBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGDBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGXBRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLGEBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLGDBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFER) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFDR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFXR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LDGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGER) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGDR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGXR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LGDR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MDTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DDTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ADTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SDTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LDETR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LEDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(FIDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MXTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DXTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AXTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SXTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LXDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LDXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(FIXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGDTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CUDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(EEDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ESDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGXTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CUXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CSXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(EEXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ESXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDGTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDUTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDSTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CEDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(QADTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(IEDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RRDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXGTRA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXUTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXSTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CEXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(QAXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(IEXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RRXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LNGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LCGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DSGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LRVGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LNGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LCGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DSGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KMAC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LRVR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KMF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KMO) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PCC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KMCTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KM) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KMC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGFR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KIMD) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KLMD) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLGDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLFDTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BCTGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CFXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLFXTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDFTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDLGTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDLFTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXFTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXLGTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXLFTR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGRT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(OGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(FLOGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGCR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGHR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MLGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DLGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALCGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLBGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(EPSW) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TRTT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TRTO) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TROT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TROO) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLCR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLHR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MLR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DLR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALCR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLBR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CU14) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CU24) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CU41) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CU42) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TRTRE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRSTU) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TRTE) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AHHHR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SHHHR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALHHHR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLHHHR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CHHR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AHHLR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SHHLR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALHHLR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLHHLR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CHLR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(POPCNT_Z) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LOCGR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NGRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(OGRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XGRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AGRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SGRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALGRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLGRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LOCR) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ORK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ARK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLRK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CVBY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DSG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CVBG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LRVG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LGH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DSGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LRV) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LRVH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NTSTG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CVDY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CVDG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STRVG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LTGF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(PFD) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STRV) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STRVH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BCTG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(OY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MFY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STHY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ICY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAEY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LGB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LHY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CHY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AHY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SHY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MHY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(OG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LGAT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALCG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLBG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STPQ) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LPQ) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ML) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGTAT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLGFAT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LBH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LHH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LLHH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STHH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LFHAT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LFH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STFH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CHF) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVCDK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVHHI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVGHI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVHI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CHHSI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGHSI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CHSI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLFHSI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TBEGIN) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TBEGINC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LMG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRAG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLAG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CSY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RLLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STMG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STMH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCMH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STCMY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDSY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDSG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BXHG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BXLEG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ECAG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TMY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVIY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(NIY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLIY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(OIY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(XIY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ASI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALSI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AGSI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALGSI) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ICMH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ICMY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MVCLU) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLCLU) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STMY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LMH) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LMY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TP) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRAK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLAK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRLK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLLK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LOCG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STOCG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LANG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAOG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAXG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAAG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAALG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LOC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STOC) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAN) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAO) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAX) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAA) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LAAL) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BRXHG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(BRXLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RISBLG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RNSBG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ROSBG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RXSBG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RISBGN) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(RISBHG) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGRJ) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGIT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CIT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CLFIT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGIJ) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CIJ) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALHSIK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ALGHSIK) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGRB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CGIB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CIB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LDEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LXDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LXEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MXDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(AEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MDEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MAEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TCEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TCDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TCXB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SQEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SQDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MEEB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(KDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(ADB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(DDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MADB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(MSDB) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLDT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRDT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SLXT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(SRXT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TDCET) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TDGET) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TDCDT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TDGDT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TDCXT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(TDGXT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LEY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(LDY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STEY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(STDY) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CZDT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CZXT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CDZT) { return DecodeInstructionOriginal(instr); }
+
+EVALUATE(CXZT) { return DecodeInstructionOriginal(instr); }
+
+#undef EVALUATE
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/s390/simulator-s390.h b/src/s390/simulator-s390.h
index ae3dd58..6e82c9a 100644
--- a/src/s390/simulator-s390.h
+++ b/src/s390/simulator-s390.h
@@ -507,6 +507,742 @@
   };
   StopCountAndDesc watched_stops_[kNumOfWatchedStops];
   void DebugStart();
+
+  int DecodeInstructionOriginal(Instruction* instr);
+  int DecodeInstruction(Instruction* instr);
+  int Evaluate_Unknown(Instruction* instr);
+#define MAX_NUM_OPCODES (1 << 16)
+  typedef int (Simulator::*EvaluateFuncType)(Instruction*);
+
+  static EvaluateFuncType EvalTable[MAX_NUM_OPCODES];
+  static void EvalTableInit();
+
+#define EVALUATE(name) int Evaluate_##name(Instruction* instr)
+  EVALUATE(BKPT);
+  EVALUATE(SPM);
+  EVALUATE(BALR);
+  EVALUATE(BCTR);
+  EVALUATE(BCR);
+  EVALUATE(SVC);
+  EVALUATE(BSM);
+  EVALUATE(BASSM);
+  EVALUATE(BASR);
+  EVALUATE(MVCL);
+  EVALUATE(CLCL);
+  EVALUATE(LPR);
+  EVALUATE(LNR);
+  EVALUATE(LTR);
+  EVALUATE(LCR);
+  EVALUATE(NR);
+  EVALUATE(CLR);
+  EVALUATE(OR);
+  EVALUATE(XR);
+  EVALUATE(LR);
+  EVALUATE(CR);
+  EVALUATE(AR);
+  EVALUATE(SR);
+  EVALUATE(MR);
+  EVALUATE(DR);
+  EVALUATE(ALR);
+  EVALUATE(SLR);
+  EVALUATE(LDR);
+  EVALUATE(CDR);
+  EVALUATE(LER);
+  EVALUATE(STH);
+  EVALUATE(LA);
+  EVALUATE(STC);
+  EVALUATE(IC_z);
+  EVALUATE(EX);
+  EVALUATE(BAL);
+  EVALUATE(BCT);
+  EVALUATE(BC);
+  EVALUATE(LH);
+  EVALUATE(CH);
+  EVALUATE(AH);
+  EVALUATE(SH);
+  EVALUATE(MH);
+  EVALUATE(BAS);
+  EVALUATE(CVD);
+  EVALUATE(CVB);
+  EVALUATE(ST);
+  EVALUATE(LAE);
+  EVALUATE(N);
+  EVALUATE(CL);
+  EVALUATE(O);
+  EVALUATE(X);
+  EVALUATE(L);
+  EVALUATE(C);
+  EVALUATE(A);
+  EVALUATE(S);
+  EVALUATE(M);
+  EVALUATE(D);
+  EVALUATE(AL);
+  EVALUATE(SL);
+  EVALUATE(STD);
+  EVALUATE(LD);
+  EVALUATE(CD);
+  EVALUATE(STE);
+  EVALUATE(MS);
+  EVALUATE(LE);
+  EVALUATE(BRXH);
+  EVALUATE(BRXLE);
+  EVALUATE(BXH);
+  EVALUATE(BXLE);
+  EVALUATE(SRL);
+  EVALUATE(SLL);
+  EVALUATE(SRA);
+  EVALUATE(SLA);
+  EVALUATE(SRDL);
+  EVALUATE(SLDL);
+  EVALUATE(SRDA);
+  EVALUATE(SLDA);
+  EVALUATE(STM);
+  EVALUATE(TM);
+  EVALUATE(MVI);
+  EVALUATE(TS);
+  EVALUATE(NI);
+  EVALUATE(CLI);
+  EVALUATE(OI);
+  EVALUATE(XI);
+  EVALUATE(LM);
+  EVALUATE(MVCLE);
+  EVALUATE(CLCLE);
+  EVALUATE(MC);
+  EVALUATE(CDS);
+  EVALUATE(STCM);
+  EVALUATE(ICM);
+  EVALUATE(BPRP);
+  EVALUATE(BPP);
+  EVALUATE(TRTR);
+  EVALUATE(MVN);
+  EVALUATE(MVC);
+  EVALUATE(MVZ);
+  EVALUATE(NC);
+  EVALUATE(CLC);
+  EVALUATE(OC);
+  EVALUATE(XC);
+  EVALUATE(MVCP);
+  EVALUATE(TR);
+  EVALUATE(TRT);
+  EVALUATE(ED);
+  EVALUATE(EDMK);
+  EVALUATE(PKU);
+  EVALUATE(UNPKU);
+  EVALUATE(MVCIN);
+  EVALUATE(PKA);
+  EVALUATE(UNPKA);
+  EVALUATE(PLO);
+  EVALUATE(LMD);
+  EVALUATE(SRP);
+  EVALUATE(MVO);
+  EVALUATE(PACK);
+  EVALUATE(UNPK);
+  EVALUATE(ZAP);
+  EVALUATE(AP);
+  EVALUATE(SP);
+  EVALUATE(MP);
+  EVALUATE(DP);
+  EVALUATE(UPT);
+  EVALUATE(PFPO);
+  EVALUATE(IIHH);
+  EVALUATE(IIHL);
+  EVALUATE(IILH);
+  EVALUATE(IILL);
+  EVALUATE(NIHH);
+  EVALUATE(NIHL);
+  EVALUATE(NILH);
+  EVALUATE(NILL);
+  EVALUATE(OIHH);
+  EVALUATE(OIHL);
+  EVALUATE(OILH);
+  EVALUATE(OILL);
+  EVALUATE(LLIHH);
+  EVALUATE(LLIHL);
+  EVALUATE(LLILH);
+  EVALUATE(LLILL);
+  EVALUATE(TMLH);
+  EVALUATE(TMLL);
+  EVALUATE(TMHH);
+  EVALUATE(TMHL);
+  EVALUATE(BRC);
+  EVALUATE(BRAS);
+  EVALUATE(BRCT);
+  EVALUATE(BRCTG);
+  EVALUATE(LHI);
+  EVALUATE(LGHI);
+  EVALUATE(AHI);
+  EVALUATE(AGHI);
+  EVALUATE(MHI);
+  EVALUATE(MGHI);
+  EVALUATE(CHI);
+  EVALUATE(CGHI);
+  EVALUATE(LARL);
+  EVALUATE(LGFI);
+  EVALUATE(BRCL);
+  EVALUATE(BRASL);
+  EVALUATE(XIHF);
+  EVALUATE(XILF);
+  EVALUATE(IIHF);
+  EVALUATE(IILF);
+  EVALUATE(NIHF);
+  EVALUATE(NILF);
+  EVALUATE(OIHF);
+  EVALUATE(OILF);
+  EVALUATE(LLIHF);
+  EVALUATE(LLILF);
+  EVALUATE(MSGFI);
+  EVALUATE(MSFI);
+  EVALUATE(SLGFI);
+  EVALUATE(SLFI);
+  EVALUATE(AGFI);
+  EVALUATE(AFI);
+  EVALUATE(ALGFI);
+  EVALUATE(ALFI);
+  EVALUATE(CGFI);
+  EVALUATE(CFI);
+  EVALUATE(CLGFI);
+  EVALUATE(CLFI);
+  EVALUATE(LLHRL);
+  EVALUATE(LGHRL);
+  EVALUATE(LHRL);
+  EVALUATE(LLGHRL);
+  EVALUATE(STHRL);
+  EVALUATE(LGRL);
+  EVALUATE(STGRL);
+  EVALUATE(LGFRL);
+  EVALUATE(LRL);
+  EVALUATE(LLGFRL);
+  EVALUATE(STRL);
+  EVALUATE(EXRL);
+  EVALUATE(PFDRL);
+  EVALUATE(CGHRL);
+  EVALUATE(CHRL);
+  EVALUATE(CGRL);
+  EVALUATE(CGFRL);
+  EVALUATE(ECTG);
+  EVALUATE(CSST);
+  EVALUATE(LPD);
+  EVALUATE(LPDG);
+  EVALUATE(BRCTH);
+  EVALUATE(AIH);
+  EVALUATE(ALSIH);
+  EVALUATE(ALSIHN);
+  EVALUATE(CIH);
+  EVALUATE(STCK);
+  EVALUATE(CFC);
+  EVALUATE(IPM);
+  EVALUATE(HSCH);
+  EVALUATE(MSCH);
+  EVALUATE(SSCH);
+  EVALUATE(STSCH);
+  EVALUATE(TSCH);
+  EVALUATE(TPI);
+  EVALUATE(SAL);
+  EVALUATE(RSCH);
+  EVALUATE(STCRW);
+  EVALUATE(STCPS);
+  EVALUATE(RCHP);
+  EVALUATE(SCHM);
+  EVALUATE(CKSM);
+  EVALUATE(SAR);
+  EVALUATE(EAR);
+  EVALUATE(MSR);
+  EVALUATE(MVST);
+  EVALUATE(CUSE);
+  EVALUATE(SRST);
+  EVALUATE(XSCH);
+  EVALUATE(STCKE);
+  EVALUATE(STCKF);
+  EVALUATE(SRNM);
+  EVALUATE(STFPC);
+  EVALUATE(LFPC);
+  EVALUATE(TRE);
+  EVALUATE(CUUTF);
+  EVALUATE(CUTFU);
+  EVALUATE(STFLE);
+  EVALUATE(SRNMB);
+  EVALUATE(SRNMT);
+  EVALUATE(LFAS);
+  EVALUATE(PPA);
+  EVALUATE(ETND);
+  EVALUATE(TEND);
+  EVALUATE(NIAI);
+  EVALUATE(TABORT);
+  EVALUATE(TRAP4);
+  EVALUATE(LPEBR);
+  EVALUATE(LNEBR);
+  EVALUATE(LTEBR);
+  EVALUATE(LCEBR);
+  EVALUATE(LDEBR);
+  EVALUATE(LXDBR);
+  EVALUATE(LXEBR);
+  EVALUATE(MXDBR);
+  EVALUATE(KEBR);
+  EVALUATE(CEBR);
+  EVALUATE(AEBR);
+  EVALUATE(SEBR);
+  EVALUATE(MDEBR);
+  EVALUATE(DEBR);
+  EVALUATE(MAEBR);
+  EVALUATE(MSEBR);
+  EVALUATE(LPDBR);
+  EVALUATE(LNDBR);
+  EVALUATE(LTDBR);
+  EVALUATE(LCDBR);
+  EVALUATE(SQEBR);
+  EVALUATE(SQDBR);
+  EVALUATE(SQXBR);
+  EVALUATE(MEEBR);
+  EVALUATE(KDBR);
+  EVALUATE(CDBR);
+  EVALUATE(ADBR);
+  EVALUATE(SDBR);
+  EVALUATE(MDBR);
+  EVALUATE(DDBR);
+  EVALUATE(MADBR);
+  EVALUATE(MSDBR);
+  EVALUATE(LPXBR);
+  EVALUATE(LNXBR);
+  EVALUATE(LTXBR);
+  EVALUATE(LCXBR);
+  EVALUATE(LEDBRA);
+  EVALUATE(LDXBRA);
+  EVALUATE(LEXBRA);
+  EVALUATE(FIXBRA);
+  EVALUATE(KXBR);
+  EVALUATE(CXBR);
+  EVALUATE(AXBR);
+  EVALUATE(SXBR);
+  EVALUATE(MXBR);
+  EVALUATE(DXBR);
+  EVALUATE(TBEDR);
+  EVALUATE(TBDR);
+  EVALUATE(DIEBR);
+  EVALUATE(FIEBRA);
+  EVALUATE(THDER);
+  EVALUATE(THDR);
+  EVALUATE(DIDBR);
+  EVALUATE(FIDBRA);
+  EVALUATE(LXR);
+  EVALUATE(LPDFR);
+  EVALUATE(LNDFR);
+  EVALUATE(LCDFR);
+  EVALUATE(LZER);
+  EVALUATE(LZDR);
+  EVALUATE(LZXR);
+  EVALUATE(SFPC);
+  EVALUATE(SFASR);
+  EVALUATE(EFPC);
+  EVALUATE(CELFBR);
+  EVALUATE(CDLFBR);
+  EVALUATE(CXLFBR);
+  EVALUATE(CEFBRA);
+  EVALUATE(CDFBRA);
+  EVALUATE(CXFBRA);
+  EVALUATE(CFEBRA);
+  EVALUATE(CFDBRA);
+  EVALUATE(CFXBRA);
+  EVALUATE(CLFEBR);
+  EVALUATE(CLFDBR);
+  EVALUATE(CLFXBR);
+  EVALUATE(CELGBR);
+  EVALUATE(CDLGBR);
+  EVALUATE(CXLGBR);
+  EVALUATE(CEGBRA);
+  EVALUATE(CDGBRA);
+  EVALUATE(CXGBRA);
+  EVALUATE(CGEBRA);
+  EVALUATE(CGDBRA);
+  EVALUATE(CGXBRA);
+  EVALUATE(CLGEBR);
+  EVALUATE(CLGDBR);
+  EVALUATE(CFER);
+  EVALUATE(CFDR);
+  EVALUATE(CFXR);
+  EVALUATE(LDGR);
+  EVALUATE(CGER);
+  EVALUATE(CGDR);
+  EVALUATE(CGXR);
+  EVALUATE(LGDR);
+  EVALUATE(MDTR);
+  EVALUATE(MDTRA);
+  EVALUATE(DDTRA);
+  EVALUATE(ADTRA);
+  EVALUATE(SDTRA);
+  EVALUATE(LDETR);
+  EVALUATE(LEDTR);
+  EVALUATE(LTDTR);
+  EVALUATE(FIDTR);
+  EVALUATE(MXTRA);
+  EVALUATE(DXTRA);
+  EVALUATE(AXTRA);
+  EVALUATE(SXTRA);
+  EVALUATE(LXDTR);
+  EVALUATE(LDXTR);
+  EVALUATE(LTXTR);
+  EVALUATE(FIXTR);
+  EVALUATE(KDTR);
+  EVALUATE(CGDTRA);
+  EVALUATE(CUDTR);
+  EVALUATE(CDTR);
+  EVALUATE(EEDTR);
+  EVALUATE(ESDTR);
+  EVALUATE(KXTR);
+  EVALUATE(CGXTRA);
+  EVALUATE(CUXTR);
+  EVALUATE(CSXTR);
+  EVALUATE(CXTR);
+  EVALUATE(EEXTR);
+  EVALUATE(ESXTR);
+  EVALUATE(CDGTRA);
+  EVALUATE(CDUTR);
+  EVALUATE(CDSTR);
+  EVALUATE(CEDTR);
+  EVALUATE(QADTR);
+  EVALUATE(IEDTR);
+  EVALUATE(RRDTR);
+  EVALUATE(CXGTRA);
+  EVALUATE(CXUTR);
+  EVALUATE(CXSTR);
+  EVALUATE(CEXTR);
+  EVALUATE(QAXTR);
+  EVALUATE(IEXTR);
+  EVALUATE(RRXTR);
+  EVALUATE(LPGR);
+  EVALUATE(LNGR);
+  EVALUATE(LTGR);
+  EVALUATE(LCGR);
+  EVALUATE(LGR);
+  EVALUATE(LGBR);
+  EVALUATE(LGHR);
+  EVALUATE(AGR);
+  EVALUATE(SGR);
+  EVALUATE(ALGR);
+  EVALUATE(SLGR);
+  EVALUATE(MSGR);
+  EVALUATE(DSGR);
+  EVALUATE(LRVGR);
+  EVALUATE(LPGFR);
+  EVALUATE(LNGFR);
+  EVALUATE(LTGFR);
+  EVALUATE(LCGFR);
+  EVALUATE(LGFR);
+  EVALUATE(LLGFR);
+  EVALUATE(LLGTR);
+  EVALUATE(AGFR);
+  EVALUATE(SGFR);
+  EVALUATE(ALGFR);
+  EVALUATE(SLGFR);
+  EVALUATE(MSGFR);
+  EVALUATE(DSGFR);
+  EVALUATE(KMAC);
+  EVALUATE(LRVR);
+  EVALUATE(CGR);
+  EVALUATE(CLGR);
+  EVALUATE(LBR);
+  EVALUATE(LHR);
+  EVALUATE(KMF);
+  EVALUATE(KMO);
+  EVALUATE(PCC);
+  EVALUATE(KMCTR);
+  EVALUATE(KM);
+  EVALUATE(KMC);
+  EVALUATE(CGFR);
+  EVALUATE(KIMD);
+  EVALUATE(KLMD);
+  EVALUATE(CFDTR);
+  EVALUATE(CLGDTR);
+  EVALUATE(CLFDTR);
+  EVALUATE(BCTGR);
+  EVALUATE(CFXTR);
+  EVALUATE(CLFXTR);
+  EVALUATE(CDFTR);
+  EVALUATE(CDLGTR);
+  EVALUATE(CDLFTR);
+  EVALUATE(CXFTR);
+  EVALUATE(CXLGTR);
+  EVALUATE(CXLFTR);
+  EVALUATE(CGRT);
+  EVALUATE(NGR);
+  EVALUATE(OGR);
+  EVALUATE(XGR);
+  EVALUATE(FLOGR);
+  EVALUATE(LLGCR);
+  EVALUATE(LLGHR);
+  EVALUATE(MLGR);
+  EVALUATE(DLGR);
+  EVALUATE(ALCGR);
+  EVALUATE(SLBGR);
+  EVALUATE(EPSW);
+  EVALUATE(TRTT);
+  EVALUATE(TRTO);
+  EVALUATE(TROT);
+  EVALUATE(TROO);
+  EVALUATE(LLCR);
+  EVALUATE(LLHR);
+  EVALUATE(MLR);
+  EVALUATE(DLR);
+  EVALUATE(ALCR);
+  EVALUATE(SLBR);
+  EVALUATE(CU14);
+  EVALUATE(CU24);
+  EVALUATE(CU41);
+  EVALUATE(CU42);
+  EVALUATE(TRTRE);
+  EVALUATE(SRSTU);
+  EVALUATE(TRTE);
+  EVALUATE(AHHHR);
+  EVALUATE(SHHHR);
+  EVALUATE(ALHHHR);
+  EVALUATE(SLHHHR);
+  EVALUATE(CHHR);
+  EVALUATE(AHHLR);
+  EVALUATE(SHHLR);
+  EVALUATE(ALHHLR);
+  EVALUATE(SLHHLR);
+  EVALUATE(CHLR);
+  EVALUATE(POPCNT_Z);
+  EVALUATE(LOCGR);
+  EVALUATE(NGRK);
+  EVALUATE(OGRK);
+  EVALUATE(XGRK);
+  EVALUATE(AGRK);
+  EVALUATE(SGRK);
+  EVALUATE(ALGRK);
+  EVALUATE(SLGRK);
+  EVALUATE(LOCR);
+  EVALUATE(NRK);
+  EVALUATE(ORK);
+  EVALUATE(XRK);
+  EVALUATE(ARK);
+  EVALUATE(SRK);
+  EVALUATE(ALRK);
+  EVALUATE(SLRK);
+  EVALUATE(LTG);
+  EVALUATE(LG);
+  EVALUATE(CVBY);
+  EVALUATE(AG);
+  EVALUATE(SG);
+  EVALUATE(ALG);
+  EVALUATE(SLG);
+  EVALUATE(MSG);
+  EVALUATE(DSG);
+  EVALUATE(CVBG);
+  EVALUATE(LRVG);
+  EVALUATE(LT);
+  EVALUATE(LGF);
+  EVALUATE(LGH);
+  EVALUATE(LLGF);
+  EVALUATE(LLGT);
+  EVALUATE(AGF);
+  EVALUATE(SGF);
+  EVALUATE(ALGF);
+  EVALUATE(SLGF);
+  EVALUATE(MSGF);
+  EVALUATE(DSGF);
+  EVALUATE(LRV);
+  EVALUATE(LRVH);
+  EVALUATE(CG);
+  EVALUATE(CLG);
+  EVALUATE(STG);
+  EVALUATE(NTSTG);
+  EVALUATE(CVDY);
+  EVALUATE(CVDG);
+  EVALUATE(STRVG);
+  EVALUATE(CGF);
+  EVALUATE(CLGF);
+  EVALUATE(LTGF);
+  EVALUATE(CGH);
+  EVALUATE(PFD);
+  EVALUATE(STRV);
+  EVALUATE(STRVH);
+  EVALUATE(BCTG);
+  EVALUATE(STY);
+  EVALUATE(MSY);
+  EVALUATE(NY);
+  EVALUATE(CLY);
+  EVALUATE(OY);
+  EVALUATE(XY);
+  EVALUATE(LY);
+  EVALUATE(CY);
+  EVALUATE(AY);
+  EVALUATE(SY);
+  EVALUATE(MFY);
+  EVALUATE(ALY);
+  EVALUATE(SLY);
+  EVALUATE(STHY);
+  EVALUATE(LAY);
+  EVALUATE(STCY);
+  EVALUATE(ICY);
+  EVALUATE(LAEY);
+  EVALUATE(LB);
+  EVALUATE(LGB);
+  EVALUATE(LHY);
+  EVALUATE(CHY);
+  EVALUATE(AHY);
+  EVALUATE(SHY);
+  EVALUATE(MHY);
+  EVALUATE(NG);
+  EVALUATE(OG);
+  EVALUATE(XG);
+  EVALUATE(LGAT);
+  EVALUATE(MLG);
+  EVALUATE(DLG);
+  EVALUATE(ALCG);
+  EVALUATE(SLBG);
+  EVALUATE(STPQ);
+  EVALUATE(LPQ);
+  EVALUATE(LLGC);
+  EVALUATE(LLGH);
+  EVALUATE(LLC);
+  EVALUATE(LLH);
+  EVALUATE(ML);
+  EVALUATE(DL);
+  EVALUATE(ALC);
+  EVALUATE(SLB);
+  EVALUATE(LLGTAT);
+  EVALUATE(LLGFAT);
+  EVALUATE(LAT);
+  EVALUATE(LBH);
+  EVALUATE(LLCH);
+  EVALUATE(STCH);
+  EVALUATE(LHH);
+  EVALUATE(LLHH);
+  EVALUATE(STHH);
+  EVALUATE(LFHAT);
+  EVALUATE(LFH);
+  EVALUATE(STFH);
+  EVALUATE(CHF);
+  EVALUATE(MVCDK);
+  EVALUATE(MVHHI);
+  EVALUATE(MVGHI);
+  EVALUATE(MVHI);
+  EVALUATE(CHHSI);
+  EVALUATE(CGHSI);
+  EVALUATE(CHSI);
+  EVALUATE(CLFHSI);
+  EVALUATE(TBEGIN);
+  EVALUATE(TBEGINC);
+  EVALUATE(LMG);
+  EVALUATE(SRAG);
+  EVALUATE(SLAG);
+  EVALUATE(SRLG);
+  EVALUATE(SLLG);
+  EVALUATE(CSY);
+  EVALUATE(RLLG);
+  EVALUATE(RLL);
+  EVALUATE(STMG);
+  EVALUATE(STMH);
+  EVALUATE(STCMH);
+  EVALUATE(STCMY);
+  EVALUATE(CDSY);
+  EVALUATE(CDSG);
+  EVALUATE(BXHG);
+  EVALUATE(BXLEG);
+  EVALUATE(ECAG);
+  EVALUATE(TMY);
+  EVALUATE(MVIY);
+  EVALUATE(NIY);
+  EVALUATE(CLIY);
+  EVALUATE(OIY);
+  EVALUATE(XIY);
+  EVALUATE(ASI);
+  EVALUATE(ALSI);
+  EVALUATE(AGSI);
+  EVALUATE(ALGSI);
+  EVALUATE(ICMH);
+  EVALUATE(ICMY);
+  EVALUATE(MVCLU);
+  EVALUATE(CLCLU);
+  EVALUATE(STMY);
+  EVALUATE(LMH);
+  EVALUATE(LMY);
+  EVALUATE(TP);
+  EVALUATE(SRAK);
+  EVALUATE(SLAK);
+  EVALUATE(SRLK);
+  EVALUATE(SLLK);
+  EVALUATE(LOCG);
+  EVALUATE(STOCG);
+  EVALUATE(LANG);
+  EVALUATE(LAOG);
+  EVALUATE(LAXG);
+  EVALUATE(LAAG);
+  EVALUATE(LAALG);
+  EVALUATE(LOC);
+  EVALUATE(STOC);
+  EVALUATE(LAN);
+  EVALUATE(LAO);
+  EVALUATE(LAX);
+  EVALUATE(LAA);
+  EVALUATE(LAAL);
+  EVALUATE(BRXHG);
+  EVALUATE(BRXLG);
+  EVALUATE(RISBLG);
+  EVALUATE(RNSBG);
+  EVALUATE(RISBG);
+  EVALUATE(ROSBG);
+  EVALUATE(RXSBG);
+  EVALUATE(RISBGN);
+  EVALUATE(RISBHG);
+  EVALUATE(CGRJ);
+  EVALUATE(CGIT);
+  EVALUATE(CIT);
+  EVALUATE(CLFIT);
+  EVALUATE(CGIJ);
+  EVALUATE(CIJ);
+  EVALUATE(AHIK);
+  EVALUATE(AGHIK);
+  EVALUATE(ALHSIK);
+  EVALUATE(ALGHSIK);
+  EVALUATE(CGRB);
+  EVALUATE(CGIB);
+  EVALUATE(CIB);
+  EVALUATE(LDEB);
+  EVALUATE(LXDB);
+  EVALUATE(LXEB);
+  EVALUATE(MXDB);
+  EVALUATE(KEB);
+  EVALUATE(CEB);
+  EVALUATE(AEB);
+  EVALUATE(SEB);
+  EVALUATE(MDEB);
+  EVALUATE(DEB);
+  EVALUATE(MAEB);
+  EVALUATE(MSEB);
+  EVALUATE(TCEB);
+  EVALUATE(TCDB);
+  EVALUATE(TCXB);
+  EVALUATE(SQEB);
+  EVALUATE(SQDB);
+  EVALUATE(MEEB);
+  EVALUATE(KDB);
+  EVALUATE(CDB);
+  EVALUATE(ADB);
+  EVALUATE(SDB);
+  EVALUATE(MDB);
+  EVALUATE(DDB);
+  EVALUATE(MADB);
+  EVALUATE(MSDB);
+  EVALUATE(SLDT);
+  EVALUATE(SRDT);
+  EVALUATE(SLXT);
+  EVALUATE(SRXT);
+  EVALUATE(TDCET);
+  EVALUATE(TDGET);
+  EVALUATE(TDCDT);
+  EVALUATE(TDGDT);
+  EVALUATE(TDCXT);
+  EVALUATE(TDGXT);
+  EVALUATE(LEY);
+  EVALUATE(LDY);
+  EVALUATE(STEY);
+  EVALUATE(STDY);
+  EVALUATE(CZDT);
+  EVALUATE(CZXT);
+  EVALUATE(CDZT);
+  EVALUATE(CXZT);
+#undef EVALUATE
 };
 
 // When running with the simulator transition into simulated execution at this
diff --git a/src/snapshot/code-serializer.cc b/src/snapshot/code-serializer.cc
index 84a08c1..1a2e077 100644
--- a/src/snapshot/code-serializer.cc
+++ b/src/snapshot/code-serializer.cc
@@ -73,12 +73,10 @@
                          where_to_point);
         return;
       case Code::STUB:
-        SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
-        return;
 #define IC_KIND_CASE(KIND) case Code::KIND:
         IC_KIND_LIST(IC_KIND_CASE)
 #undef IC_KIND_CASE
-        SerializeIC(code_object, how_to_code, where_to_point);
+        SerializeCodeStub(code_object, how_to_code, where_to_point);
         return;
       case Code::FUNCTION:
         DCHECK(code_object->has_reloc_info_for_serialization());
@@ -130,71 +128,23 @@
   sink_->PutInt(builtin_index, "builtin_index");
 }
 
-void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
+void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
                                        WhereToPoint where_to_point) {
-  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
-         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
-         (how_to_code == kFromCode && where_to_point == kInnerPointer));
+  // We only arrive here if we have not encountered this code stub before.
+  DCHECK(!reference_map()->Lookup(code_stub).is_valid());
+  uint32_t stub_key = code_stub->stub_key();
   DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
   DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
-
-  int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
-
-  if (FLAG_trace_serializer) {
-    PrintF(" Encoding code stub %s as %d\n",
-           CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)), index);
-  }
-
-  sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
-  sink_->PutInt(index, "CodeStub key");
-}
-
-void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
-                                 WhereToPoint where_to_point) {
-  // The IC may be implemented as a stub.
-  uint32_t stub_key = ic->stub_key();
-  if (stub_key != CodeStub::NoCacheKey()) {
-    if (FLAG_trace_serializer) {
-      PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
-    }
-    SerializeCodeStub(stub_key, how_to_code, where_to_point);
-    return;
-  }
-  // The IC may be implemented as builtin. Only real builtins have an
-  // actual builtin_index value attached (otherwise it's just garbage).
-  // Compare to make sure we are really dealing with a builtin.
-  int builtin_index = ic->builtin_index();
-  if (builtin_index < Builtins::builtin_count) {
-    Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
-    Code* builtin = isolate()->builtins()->builtin(name);
-    if (builtin == ic) {
-      if (FLAG_trace_serializer) {
-        PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
-      }
-      DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
-             ic->kind() == Code::KEYED_STORE_IC);
-      SerializeBuiltin(builtin_index, how_to_code, where_to_point);
-      return;
-    }
-  }
-  // The IC may also just be a piece of code kept in the non_monomorphic_cache.
-  // In that case, just serialize as a normal code object.
-  if (FLAG_trace_serializer) {
-    PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
-  }
-  DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
-  SerializeGeneric(ic, how_to_code, where_to_point);
-}
-
-int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
-  // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
-  int index = 0;
-  while (index < stub_keys_.length()) {
-    if (stub_keys_[index] == stub_key) return index;
-    index++;
-  }
   stub_keys_.Add(stub_key);
-  return index;
+
+  SerializerReference reference =
+      reference_map()->AddAttachedReference(code_stub);
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding code stub %s as attached reference %d\n",
+           CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)),
+           reference.attached_reference_index());
+  }
+  PutAttachedReference(reference, how_to_code, where_to_point);
 }
 
 MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
@@ -212,18 +162,13 @@
     return MaybeHandle<SharedFunctionInfo>();
   }
 
-  // Prepare and register list of attached objects.
-  Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
-  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
-      code_stub_keys.length() + kCodeStubsBaseIndex);
-  attached_objects[kSourceObjectIndex] = source;
-  for (int i = 0; i < code_stub_keys.length(); i++) {
-    attached_objects[i + kCodeStubsBaseIndex] =
-        CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
-  }
-
   Deserializer deserializer(scd.get());
-  deserializer.SetAttachedObjects(attached_objects);
+  deserializer.AddAttachedObject(source);
+  Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
+  for (int i = 0; i < code_stub_keys.length(); i++) {
+    deserializer.AddAttachedObject(
+        CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked());
+  }
 
   // Deserialize.
   Handle<SharedFunctionInfo> result;
@@ -247,8 +192,8 @@
       Script* script = Script::cast(result->script());
       if (script->name()->IsString()) name = String::cast(script->name());
     }
-    isolate->logger()->CodeCreateEvent(
-        Logger::SCRIPT_TAG, result->abstract_code(), *result, NULL, name);
+    isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG,
+                                       result->abstract_code(), *result, name);
   }
   return scope.CloseAndEscape(result);
 }
diff --git a/src/snapshot/code-serializer.h b/src/snapshot/code-serializer.h
index b217fff..8ed4cf6 100644
--- a/src/snapshot/code-serializer.h
+++ b/src/snapshot/code-serializer.h
@@ -20,11 +20,6 @@
   MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
       Isolate* isolate, ScriptData* cached_data, Handle<String> source);
 
-  static const int kSourceObjectIndex = 0;
-  STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
-
-  static const int kCodeStubsBaseIndex = 1;
-
   String* source() const {
     DCHECK(!AllowHeapAllocation::IsAllowed());
     return source_;
@@ -35,7 +30,7 @@
  private:
   CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
       : Serializer(isolate, sink), source_(source) {
-    back_reference_map_.AddSourceString(source);
+    reference_map_.AddAttachedReference(source);
   }
 
   ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
@@ -45,13 +40,10 @@
 
   void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
                         WhereToPoint where_to_point);
-  void SerializeIC(Code* ic, HowToCode how_to_code,
-                   WhereToPoint where_to_point);
-  void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
+  void SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
                          WhereToPoint where_to_point);
   void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
                         WhereToPoint where_to_point);
-  int AddCodeStubKey(uint32_t stub_key);
 
   DisallowHeapAllocation no_gc_;
   String* source_;
diff --git a/src/snapshot/deserializer.cc b/src/snapshot/deserializer.cc
index 0a21fef..88820ae 100644
--- a/src/snapshot/deserializer.cc
+++ b/src/snapshot/deserializer.cc
@@ -119,9 +119,7 @@
     return MaybeHandle<Object>();
   }
 
-  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
-  attached_objects[kGlobalProxyReference] = global_proxy;
-  SetAttachedObjects(attached_objects);
+  AddAttachedObject(global_proxy);
 
   DisallowHeapAllocation no_gc;
   // Keep track of the code space start and end pointers in case new
@@ -167,7 +165,6 @@
 Deserializer::~Deserializer() {
   // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
   // DCHECK(source_.AtEOF());
-  attached_objects_.Dispose();
 }
 
 // This is called on the roots.  It is the driver of the deserialization
@@ -315,7 +312,8 @@
 
 HeapObject* Deserializer::GetBackReferencedObject(int space) {
   HeapObject* obj;
-  BackReference back_reference(source_.GetInt());
+  SerializerReference back_reference =
+      SerializerReference::FromBitfield(source_.GetInt());
   if (space == LO_SPACE) {
     CHECK(back_reference.chunk_index() == 0);
     uint32_t index = back_reference.large_object_index();
@@ -496,7 +494,6 @@
         new_object = reinterpret_cast<Object*>(address);                       \
       } else if (where == kAttachedReference) {                                \
         int index = source_.GetInt();                                          \
-        DCHECK(deserializing_user_code() || index == kGlobalProxyReference);   \
         new_object = *attached_objects_[index];                                \
         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
       } else {                                                                 \
diff --git a/src/snapshot/deserializer.h b/src/snapshot/deserializer.h
index 58c481c..08b3444 100644
--- a/src/snapshot/deserializer.h
+++ b/src/snapshot/deserializer.h
@@ -53,10 +53,10 @@
   // Deserialize a shared function info. Fail gracefully.
   MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
 
-  // Pass a vector of externally-provided objects referenced by the snapshot.
-  // The ownership to its backing store is handed over as well.
-  void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
-    attached_objects_ = attached_objects;
+  // Add an object to back an attached reference. The order to add objects must
+  // mirror the order they are added in the serializer.
+  void AddAttachedObject(Handle<HeapObject> attached_object) {
+    attached_objects_.Add(attached_object);
   }
 
  private:
@@ -117,7 +117,7 @@
   Isolate* isolate_;
 
   // Objects from the attached object descriptions in the serialized user code.
-  Vector<Handle<Object> > attached_objects_;
+  List<Handle<HeapObject> > attached_objects_;
 
   SnapshotByteSource source_;
   uint32_t magic_number_;
diff --git a/src/snapshot/partial-serializer.cc b/src/snapshot/partial-serializer.cc
index 0f1f133..34defb4 100644
--- a/src/snapshot/partial-serializer.cc
+++ b/src/snapshot/partial-serializer.cc
@@ -14,7 +14,6 @@
                                      SnapshotByteSink* sink)
     : Serializer(isolate, sink),
       startup_serializer_(startup_snapshot_serializer),
-      global_object_(NULL),
       next_partial_cache_index_(0) {
   InitializeCodeAddressMap();
 }
@@ -26,8 +25,7 @@
 void PartialSerializer::Serialize(Object** o) {
   if ((*o)->IsContext()) {
     Context* context = Context::cast(*o);
-    global_object_ = context->global_object();
-    back_reference_map()->AddGlobalProxy(context->global_proxy());
+    reference_map()->AddAttachedReference(context->global_proxy());
     // The bootstrap snapshot has a code-stub context. When serializing the
     // partial snapshot, it is chained into the weak context list on the isolate
     // and it's next context pointer may point to the code-stub context.  Clear
@@ -74,7 +72,7 @@
   // Pointers from the partial snapshot to the objects in the startup snapshot
   // should go through the root array or through the partial snapshot cache.
   // If this is not the case you may have to add something to the root array.
-  DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
+  DCHECK(!startup_serializer_->reference_map()->Lookup(obj).is_valid());
   // All the internalized strings that the partial snapshot needs should be
   // either in the root table or in the partial snapshot cache.
   DCHECK(!obj->IsInternalizedString());
diff --git a/src/snapshot/partial-serializer.h b/src/snapshot/partial-serializer.h
index 0bf61dd..ddaba5f 100644
--- a/src/snapshot/partial-serializer.h
+++ b/src/snapshot/partial-serializer.h
@@ -50,7 +50,6 @@
   bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
 
   Serializer* startup_serializer_;
-  Object* global_object_;
   PartialCacheIndexMap partial_cache_index_map_;
   int next_partial_cache_index_;
   DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
diff --git a/src/snapshot/serializer-common.cc b/src/snapshot/serializer-common.cc
index eeb7eb7..4afaa20 100644
--- a/src/snapshot/serializer-common.cc
+++ b/src/snapshot/serializer-common.cc
@@ -20,7 +20,10 @@
     Address addr = table->address(i);
     if (addr == ExternalReferenceTable::NotAvailable()) continue;
     // We expect no duplicate external references entries in the table.
-    DCHECK_NULL(map_->Lookup(addr, Hash(addr)));
+    // AccessorRefTable getter may have duplicates, indicated by an empty string
+    // as name.
+    DCHECK(table->name(i)[0] == '\0' ||
+           map_->Lookup(addr, Hash(addr)) == nullptr);
     map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i);
   }
   isolate->set_external_reference_map(map_);
diff --git a/src/snapshot/serializer-common.h b/src/snapshot/serializer-common.h
index 645a9af..1ce5ced 100644
--- a/src/snapshot/serializer-common.h
+++ b/src/snapshot/serializer-common.h
@@ -40,11 +40,13 @@
   }
 
   void Add(HeapObject* object) {
+    DCHECK(!AllowHeapAllocation::IsAllowed());
     circular_queue_[index_] = object;
     index_ = (index_ + 1) & kSizeMask;
   }
 
   HeapObject* Get(int index) {
+    DCHECK(!AllowHeapAllocation::IsAllowed());
     DCHECK_NOT_NULL(circular_queue_[index]);
     return circular_queue_[index];
   }
@@ -52,6 +54,7 @@
   static const int kNotFound = -1;
 
   int Find(HeapObject* object) {
+    DCHECK(!AllowHeapAllocation::IsAllowed());
     for (int i = 0; i < kSize; i++) {
       if (circular_queue_[i] == object) return i;
     }
@@ -211,12 +214,6 @@
   // Sentinel after a new object to indicate that double alignment is needed.
   static const int kDoubleAlignmentSentinel = 0;
 
-  // Used as index for the attached reference representing the source object.
-  static const int kSourceObjectReference = 0;
-
-  // Used as index for the attached reference representing the global proxy.
-  static const int kGlobalProxyReference = 0;
-
   // ---------- member variable ----------
   HotObjectsList hot_objects_;
 };
diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc
index 4169338..f6f2200 100644
--- a/src/snapshot/serializer.cc
+++ b/src/snapshot/serializer.cc
@@ -71,15 +71,15 @@
   for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
     size_t s = pending_chunk_[space];
     for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
-    PrintF("%16" V8_SIZET_PREFIX V8_PTR_PREFIX "d", s);
+    PrintF("%16" PRIuS, s);
   }
   PrintF("%16d\n", large_objects_total_size_);
 #ifdef OBJECT_PRINT
   PrintF("  Instance types (count and bytes):\n");
-#define PRINT_INSTANCE_TYPE(Name)                                         \
-  if (instance_type_count_[Name]) {                                       \
-    PrintF("%10d %10" V8_SIZET_PREFIX V8_PTR_PREFIX "d  %s\n",            \
-           instance_type_count_[Name], instance_type_size_[Name], #Name); \
+#define PRINT_INSTANCE_TYPE(Name)                                 \
+  if (instance_type_count_[Name]) {                               \
+    PrintF("%10d %10" PRIuS "  %s\n", instance_type_count_[Name], \
+           instance_type_size_[Name], #Name);                     \
   }
   INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
 #undef PRINT_INSTANCE_TYPE
@@ -124,10 +124,9 @@
 }
 
 #ifdef DEBUG
-bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
-  DCHECK(reference.is_valid());
-  DCHECK(!reference.is_source());
-  DCHECK(!reference.is_global_proxy());
+bool Serializer::BackReferenceIsAlreadyAllocated(
+    SerializerReference reference) {
+  DCHECK(reference.is_back_reference());
   AllocationSpace space = reference.space();
   int chunk_index = reference.chunk_index();
   if (space == LO_SPACE) {
@@ -163,25 +162,21 @@
       return true;
     }
   }
-  BackReference back_reference = back_reference_map_.Lookup(obj);
-  if (back_reference.is_valid()) {
+  SerializerReference reference = reference_map_.Lookup(obj);
+  if (reference.is_valid()) {
     // Encode the location of an already deserialized object in order to write
     // its location into a later object.  We can encode the location as an
     // offset fromthe start of the deserialized objects or as an offset
     // backwards from thecurrent allocation pointer.
-    if (back_reference.is_source()) {
+    if (reference.is_attached_reference()) {
       FlushSkip(skip);
-      if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
-      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
-      sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
-      sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
-    } else if (back_reference.is_global_proxy()) {
-      FlushSkip(skip);
-      if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
-      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
-      sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
-      sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
+      if (FLAG_trace_serializer) {
+        PrintF(" Encoding attached reference %d\n",
+               reference.attached_reference_index());
+      }
+      PutAttachedReference(reference, how_to_code, where_to_point);
     } else {
+      DCHECK(reference.is_back_reference());
       if (FLAG_trace_serializer) {
         PrintF(" Encoding back reference to: ");
         obj->ShortPrint();
@@ -189,7 +184,7 @@
       }
 
       PutAlignmentPrefix(obj);
-      AllocationSpace space = back_reference.space();
+      AllocationSpace space = reference.space();
       if (skip == 0) {
         sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
       } else {
@@ -197,7 +192,7 @@
                    "BackRefWithSkip");
         sink_->PutInt(skip, "BackRefSkipDistance");
       }
-      PutBackReference(obj, back_reference);
+      PutBackReference(obj, reference);
     }
     return true;
   }
@@ -236,12 +231,24 @@
   for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte");
 }
 
-void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
+void Serializer::PutBackReference(HeapObject* object,
+                                  SerializerReference reference) {
   DCHECK(BackReferenceIsAlreadyAllocated(reference));
-  sink_->PutInt(reference.reference(), "BackRefValue");
+  sink_->PutInt(reference.back_reference(), "BackRefValue");
   hot_objects_.Add(object);
 }
 
+void Serializer::PutAttachedReference(SerializerReference reference,
+                                      HowToCode how_to_code,
+                                      WhereToPoint where_to_point) {
+  DCHECK(reference.is_attached_reference());
+  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+         (how_to_code == kFromCode && where_to_point == kInnerPointer));
+  sink_->Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
+  sink_->PutInt(reference.attached_reference_index(), "AttachedRefIndex");
+}
+
 int Serializer::PutAlignmentPrefix(HeapObject* object) {
   AllocationAlignment alignment = object->RequiredAlignment();
   if (alignment != kWordAligned) {
@@ -253,14 +260,14 @@
   return 0;
 }
 
-BackReference Serializer::AllocateLargeObject(int size) {
+SerializerReference Serializer::AllocateLargeObject(int size) {
   // Large objects are allocated one-by-one when deserializing. We do not
   // have to keep track of multiple chunks.
   large_objects_total_size_ += size;
-  return BackReference::LargeObjectReference(seen_large_objects_index_++);
+  return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
 }
 
-BackReference Serializer::Allocate(AllocationSpace space, int size) {
+SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
   DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
   DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
   uint32_t new_chunk_size = pending_chunk_[space] + size;
@@ -270,14 +277,13 @@
     sink_->Put(kNextChunk, "NextChunk");
     sink_->Put(space, "NextChunkSpace");
     completed_chunks_[space].Add(pending_chunk_[space]);
-    DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
     pending_chunk_[space] = 0;
     new_chunk_size = size;
   }
   uint32_t offset = pending_chunk_[space];
   pending_chunk_[space] = new_chunk_size;
-  return BackReference::Reference(space, completed_chunks_[space].length(),
-                                  offset);
+  return SerializerReference::BackReference(
+      space, completed_chunks_[space].length(), offset);
 }
 
 void Serializer::Pad() {
@@ -320,7 +326,7 @@
         CodeNameEvent(object_->address(), sink_->Position(), code_name));
   }
 
-  BackReference back_reference;
+  SerializerReference back_reference;
   if (space == LO_SPACE) {
     sink_->Put(kNewObject + reference_representation_ + space,
                "NewLargeObject");
@@ -345,7 +351,7 @@
 #endif  // OBJECT_PRINT
 
   // Mark this object as already serialized.
-  serializer_->back_reference_map()->Add(object_, back_reference);
+  serializer_->reference_map()->Add(object_, back_reference);
 
   // Serialize the map (first word of the object).
   serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
@@ -513,15 +519,17 @@
 
   int size = object_->Size();
   Map* map = object_->map();
-  BackReference reference = serializer_->back_reference_map()->Lookup(object_);
+  SerializerReference back_reference =
+      serializer_->reference_map()->Lookup(object_);
+  DCHECK(back_reference.is_back_reference());
 
   // Serialize the rest of the object.
   CHECK_EQ(0, bytes_processed_so_far_);
   bytes_processed_so_far_ = kPointerSize;
 
   serializer_->PutAlignmentPrefix(object_);
-  sink_->Put(kNewObject + reference.space(), "deferred object");
-  serializer_->PutBackReference(object_, reference);
+  sink_->Put(kNewObject + back_reference.space(), "deferred object");
+  serializer_->PutBackReference(object_, back_reference);
   sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
 
   UnlinkWeakNextScope unlink_weak_next(object_);
diff --git a/src/snapshot/serializer.h b/src/snapshot/serializer.h
index eccbaab..f99cd72 100644
--- a/src/snapshot/serializer.h
+++ b/src/snapshot/serializer.h
@@ -128,7 +128,7 @@
 
   Isolate* isolate() const { return isolate_; }
 
-  BackReferenceMap* back_reference_map() { return &back_reference_map_; }
+  SerializerReferenceMap* reference_map() { return &reference_map_; }
   RootIndexMap* root_index_map() { return &root_index_map_; }
 
 #ifdef OBJECT_PRINT
@@ -162,7 +162,10 @@
 
   void PutSmi(Smi* smi);
 
-  void PutBackReference(HeapObject* object, BackReference reference);
+  void PutBackReference(HeapObject* object, SerializerReference reference);
+
+  void PutAttachedReference(SerializerReference reference,
+                            HowToCode how_to_code, WhereToPoint where_to_point);
 
   // Emit alignment prefix if necessary, return required padding space in bytes.
   int PutAlignmentPrefix(HeapObject* object);
@@ -178,11 +181,11 @@
     }
   }
 
-  bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
+  bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference);
 
   // This will return the space for an object.
-  BackReference AllocateLargeObject(int size);
-  BackReference Allocate(AllocationSpace space, int size);
+  SerializerReference AllocateLargeObject(int size);
+  SerializerReference Allocate(AllocationSpace space, int size);
   int EncodeExternalReference(Address addr) {
     return external_reference_encoder_.Encode(addr);
   }
@@ -207,7 +210,7 @@
   SnapshotByteSink* sink() const { return sink_; }
 
   void QueueDeferredObject(HeapObject* obj) {
-    DCHECK(back_reference_map_.Lookup(obj).is_valid());
+    DCHECK(reference_map_.Lookup(obj).is_back_reference());
     deferred_objects_.Add(obj);
   }
 
@@ -218,7 +221,7 @@
   SnapshotByteSink* sink_;
   ExternalReferenceEncoder external_reference_encoder_;
 
-  BackReferenceMap back_reference_map_;
+  SerializerReferenceMap reference_map_;
   RootIndexMap root_index_map_;
 
   int recursion_depth_;
diff --git a/src/snapshot/snapshot-common.cc b/src/snapshot/snapshot-common.cc
index eb3bdb5..a951b0d 100644
--- a/src/snapshot/snapshot-common.cc
+++ b/src/snapshot/snapshot-common.cc
@@ -142,8 +142,9 @@
                  Page::kObjectStartOffset;
       // Add a small allowance to the code space for small scripts.
       if (space == CODE_SPACE) required += 32 * KB;
-    } else {
-      // We expect the vanilla snapshot to only require on page per space.
+    } else if (!FLAG_debug_code) {
+      // We expect the vanilla snapshot to only require one page per space,
+      // unless we are emitting debug code.
       DCHECK(!is_default_snapshot);
     }
 
diff --git a/src/snapshot/startup-serializer.cc b/src/snapshot/startup-serializer.cc
index fab01f5..c3f9b3e 100644
--- a/src/snapshot/startup-serializer.cc
+++ b/src/snapshot/startup-serializer.cc
@@ -73,8 +73,8 @@
     // Make sure that the immortal immovable root has been included in the first
     // chunk of its reserved space , so that it is deserialized onto the first
     // page of its space and stays immortal immovable.
-    BackReference ref = back_reference_map_.Lookup(obj);
-    CHECK(ref.is_valid() && ref.chunk_index() == 0);
+    SerializerReference ref = reference_map_.Lookup(obj);
+    CHECK(ref.is_back_reference() && ref.chunk_index() == 0);
   }
 }
 
diff --git a/src/startup-data-util.cc b/src/startup-data-util.cc
index e20ec21..4e0ad97 100644
--- a/src/startup-data-util.cc
+++ b/src/startup-data-util.cc
@@ -9,7 +9,6 @@
 
 #include "src/base/logging.h"
 #include "src/base/platform/platform.h"
-#include "src/flags.h"
 #include "src/utils.h"
 
 
@@ -108,9 +107,7 @@
   char* natives;
   char* snapshot;
   LoadFromFiles(RelativePath(&natives, directory_path, "natives_blob.bin"),
-                RelativePath(&snapshot, directory_path,
-                             FLAG_ignition ? "snapshot_blob_ignition.bin"
-                                           : "snapshot_blob.bin"));
+                RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
   free(natives);
   free(snapshot);
 #endif  // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/src/third_party/vtune/v8vtune.gyp b/src/third_party/vtune/v8vtune.gyp
index 6adf365..aaf521f 100644
--- a/src/third_party/vtune/v8vtune.gyp
+++ b/src/third_party/vtune/v8vtune.gyp
@@ -29,13 +29,13 @@
   'variables': {
     'v8_code': 1,
   },
-  'includes': ['../../../build/toolchain.gypi', '../../../build/features.gypi'],
+  'includes': ['../../../gypfiles/toolchain.gypi', '../../../gypfiles/features.gypi'],
   'targets': [
     {
       'target_name': 'v8_vtune',
       'type': 'static_library',
       'dependencies': [
-        '../../../tools/gyp/v8.gyp:v8',
+        '../../v8.gyp:v8',
       ],
       'sources': [
         'ittnotify_config.h',
diff --git a/src/third_party/vtune/vtune-jit.cc b/src/third_party/vtune/vtune-jit.cc
index 30f6196..0bd1954 100644
--- a/src/third_party/vtune/vtune-jit.cc
+++ b/src/third_party/vtune/vtune-jit.cc
@@ -55,26 +55,12 @@
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
+
+#include <stdlib.h>
 #include <string.h>
 
-#ifdef WIN32
-#include <hash_map>
-using namespace std;
-#else
-// To avoid GCC 4.4 compilation warning about hash_map being deprecated.
-#define OLD_DEPRECATED __DEPRECATED
-#undef __DEPRECATED
-#if defined (ANDROID)
-#include <hash_map>
-using namespace std;
-#else
-#include <ext/hash_map>
-using namespace __gnu_cxx;
-#endif
-#define __DEPRECATED OLD_DEPRECATED
-#endif
-
 #include <list>
+#include <unordered_map>
 
 #include "v8-vtune.h"
 #include "vtune-jit.h"
@@ -126,11 +112,8 @@
   }
 };
 
-#ifdef WIN32
-typedef hash_map<void*, void*> JitInfoMap;
-#else
-typedef hash_map<void*, void*, HashForCodeObject, SameCodeObjects> JitInfoMap;
-#endif
+typedef std::unordered_map<void*, void*, HashForCodeObject, SameCodeObjects>
+    JitInfoMap;
 
 static JitInfoMap* GetEntries() {
   static JitInfoMap* entries;
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index ea02d61..828a673 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -113,8 +113,7 @@
   return name == heap->nonextensible_symbol() ||
          name == heap->sealed_symbol() || name == heap->frozen_symbol() ||
          name == heap->elements_transition_symbol() ||
-         name == heap->strict_function_transition_symbol() ||
-         name == heap->observed_symbol();
+         name == heap->strict_function_transition_symbol();
 }
 #endif
 
diff --git a/src/type-info.cc b/src/type-info.cc
index 9087576..87b727e 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -295,7 +295,7 @@
     *key_type = ELEMENT;
   } else {
     KeyedLoadICNexus nexus(feedback_vector_, slot);
-    CollectReceiverTypes<FeedbackNexus>(&nexus, receiver_types);
+    CollectReceiverTypes(&nexus, receiver_types);
     *is_string = HasOnlyStringMaps(receiver_types);
     *key_type = nexus.GetKeyType();
   }
@@ -332,21 +332,20 @@
                                               Code::Flags flags,
                                               SmallMapList* types) {
   StoreICNexus nexus(feedback_vector_, slot);
-  CollectReceiverTypes<FeedbackNexus>(&nexus, name, flags, types);
+  CollectReceiverTypes(&nexus, name, flags, types);
 }
 
-
-template <class T>
-void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<Name> name,
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackNexus* nexus,
+                                              Handle<Name> name,
                                               Code::Flags flags,
                                               SmallMapList* types) {
   if (FLAG_collect_megamorphic_maps_from_stub_cache &&
-      obj->ic_state() == MEGAMORPHIC) {
+      nexus->ic_state() == MEGAMORPHIC) {
     types->Reserve(4, zone());
     isolate()->stub_cache()->CollectMatchingMaps(
         types, name, flags, native_context_, zone());
   } else {
-    CollectReceiverTypes<T>(obj, types);
+    CollectReceiverTypes(nexus, types);
   }
 }
 
@@ -356,23 +355,22 @@
   FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
   if (kind == FeedbackVectorSlotKind::STORE_IC) {
     StoreICNexus nexus(feedback_vector_, slot);
-    CollectReceiverTypes<FeedbackNexus>(&nexus, types);
+    CollectReceiverTypes(&nexus, types);
   } else {
     DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, kind);
     KeyedStoreICNexus nexus(feedback_vector_, slot);
-    CollectReceiverTypes<FeedbackNexus>(&nexus, types);
+    CollectReceiverTypes(&nexus, types);
   }
 }
 
-
-template <class T>
-void TypeFeedbackOracle::CollectReceiverTypes(T* obj, SmallMapList* types) {
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackNexus* nexus,
+                                              SmallMapList* types) {
   MapHandleList maps;
-  if (obj->ic_state() == MONOMORPHIC) {
-    Map* map = obj->FindFirstMap();
+  if (nexus->ic_state() == MONOMORPHIC) {
+    Map* map = nexus->FindFirstMap();
     if (map != NULL) maps.Add(handle(map));
-  } else if (obj->ic_state() == POLYMORPHIC) {
-    obj->FindAllMaps(&maps);
+  } else if (nexus->ic_state() == POLYMORPHIC) {
+    nexus->FindAllMaps(&maps);
   } else {
     return;
   }
diff --git a/src/type-info.h b/src/type-info.h
index c4b0928..3c734be 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -17,7 +17,7 @@
 
 // Forward declarations.
 class SmallMapList;
-
+class FeedbackNexus;
 
 class TypeFeedbackOracle: public ZoneObject {
  public:
@@ -56,8 +56,7 @@
                           SmallMapList* receiver_types);
 
   void CollectReceiverTypes(FeedbackVectorSlot slot, SmallMapList* types);
-  template <class T>
-  void CollectReceiverTypes(T* obj, SmallMapList* types);
+  void CollectReceiverTypes(FeedbackNexus* nexus, SmallMapList* types);
 
   static bool IsRelevantFeedback(Map* map, Context* native_context) {
     Object* constructor = map->GetConstructor();
@@ -98,9 +97,8 @@
  private:
   void CollectReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
                             Code::Flags flags, SmallMapList* types);
-  template <class T>
-  void CollectReceiverTypes(T* obj, Handle<Name> name, Code::Flags flags,
-                            SmallMapList* types);
+  void CollectReceiverTypes(FeedbackNexus* nexus, Handle<Name> name,
+                            Code::Flags flags, SmallMapList* types);
 
   // Returns true if there is at least one string map and if
   // all maps are string maps.
diff --git a/src/types.cc b/src/types.cc
index 49c9418..c222861 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -192,7 +192,8 @@
              map == heap->no_interceptor_result_sentinel_map() ||
              map == heap->termination_exception_map() ||
              map == heap->arguments_marker_map() ||
-             map == heap->optimized_out_map());
+             map == heap->optimized_out_map() ||
+             map == heap->stale_register_map());
       return kInternal & kTaggedPointer;
     }
     case HEAP_NUMBER_TYPE:
@@ -202,6 +203,7 @@
     case JS_OBJECT_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
+    case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
       if (map->is_undetectable()) return kOtherUndetectable;
       return kOtherObject;
@@ -266,8 +268,6 @@
     case SIGNATURE_INFO_TYPE:
     case TYPE_SWITCH_INFO_TYPE:
     case ALLOCATION_MEMENTO_TYPE:
-    case CODE_CACHE_TYPE:
-    case POLYMORPHIC_CODE_CACHE_TYPE:
     case TYPE_FEEDBACK_INFO_TYPE:
     case ALIASED_ARGUMENTS_ENTRY_TYPE:
     case BOX_TYPE:
diff --git a/src/typing-asm.cc b/src/typing-asm.cc
index 7482c4f..e541539 100644
--- a/src/typing-asm.cc
+++ b/src/typing-asm.cc
@@ -44,7 +44,7 @@
       root_(root),
       valid_(true),
       allow_simd_(false),
-      property_info_(NULL),
+      property_info_(nullptr),
       intish_(0),
       stdlib_types_(zone),
       stdlib_heap_types_(zone),
@@ -62,7 +62,8 @@
       in_function_(false),
       building_function_tables_(false),
       visiting_exports_(false),
-      cache_(TypeCache::Get()) {
+      cache_(TypeCache::Get()),
+      bounds_(zone) {
   InitializeAstVisitor(isolate);
   InitializeStdlib();
 }
@@ -79,16 +80,16 @@
   if (!scope->is_function_scope()) FAIL(fun, "not at function scope");
 
   ExpressionStatement* use_asm = fun->body()->first()->AsExpressionStatement();
-  if (use_asm == NULL) FAIL(fun, "missing \"use asm\"");
+  if (use_asm == nullptr) FAIL(fun, "missing \"use asm\"");
   Literal* use_asm_literal = use_asm->expression()->AsLiteral();
-  if (use_asm_literal == NULL) FAIL(fun, "missing \"use asm\"");
+  if (use_asm_literal == nullptr) FAIL(fun, "missing \"use asm\"");
   if (!use_asm_literal->raw_value()->AsString()->IsOneByteEqualTo("use asm"))
     FAIL(fun, "missing \"use asm\"");
 
   // Module parameters.
   for (int i = 0; i < scope->num_parameters(); ++i) {
     Variable* param = scope->parameter(i);
-    DCHECK(GetType(param) == NULL);
+    DCHECK(GetType(param) == nullptr);
     SetType(param, Type::None());
   }
 
@@ -96,7 +97,7 @@
 
   // Set all globals to type Any.
   VariableDeclaration* decl = scope->function();
-  if (decl != NULL) SetType(decl->proxy()->var(), Type::None());
+  if (decl != nullptr) SetType(decl->proxy()->var(), Type::None());
   RECURSE(VisitDeclarations(scope->declarations()));
 
   // Validate global variables.
@@ -105,15 +106,15 @@
   // Validate function annotations.
   for (int i = 0; i < decls->length(); ++i) {
     FunctionDeclaration* decl = decls->at(i)->AsFunctionDeclaration();
-    if (decl != NULL) {
+    if (decl != nullptr) {
       RECURSE(VisitFunctionAnnotation(decl->fun()));
       Variable* var = decl->proxy()->var();
-      if (property_info_ != NULL) {
+      if (property_info_ != nullptr) {
         SetVariableInfo(var, property_info_);
-        property_info_ = NULL;
+        property_info_ = nullptr;
       }
       SetType(var, computed_type_);
-      DCHECK(GetType(var) != NULL);
+      DCHECK(GetType(var) != nullptr);
     }
   }
 
@@ -125,7 +126,7 @@
   // Validate function bodies.
   for (int i = 0; i < decls->length(); ++i) {
     FunctionDeclaration* decl = decls->at(i)->AsFunctionDeclaration();
-    if (decl != NULL) {
+    if (decl != nullptr) {
       RECURSE(VisitWithExpectation(decl->fun(), Type::Any(), "UNREACHABLE"));
       if (!computed_type_->IsFunction()) {
         FAIL(decl->fun(), "function literal expected to be a function");
@@ -147,13 +148,13 @@
 void AsmTyper::VisitVariableDeclaration(VariableDeclaration* decl) {
   Variable* var = decl->proxy()->var();
   if (var->location() != VariableLocation::PARAMETER) {
-    if (GetType(var) == NULL) {
+    if (GetType(var) == nullptr) {
       SetType(var, Type::Any());
     } else {
       DCHECK(!GetType(var)->IsFunction());
     }
   }
-  DCHECK(GetType(var) != NULL);
+  DCHECK(GetType(var) != nullptr);
   intish_ = 0;
 }
 
@@ -175,14 +176,14 @@
   Type* result_type = Type::Undefined();
   if (body->length() > 0) {
     ReturnStatement* stmt = body->last()->AsReturnStatement();
-    if (stmt != NULL) {
+    if (stmt != nullptr) {
       Literal* literal = stmt->expression()->AsLiteral();
       Type* old_expected = expected_type_;
       expected_type_ = Type::Any();
       if (literal) {
         RECURSE(VisitLiteral(literal, true));
       } else {
-        RECURSE(VisitExpressionAnnotation(stmt->expression(), NULL, true));
+        RECURSE(VisitExpressionAnnotation(stmt->expression(), nullptr, true));
       }
       expected_type_ = old_expected;
       result_type = computed_type_;
@@ -197,18 +198,18 @@
     good = false;
     if (i >= body->length()) break;
     ExpressionStatement* stmt = body->at(i)->AsExpressionStatement();
-    if (stmt == NULL) break;
+    if (stmt == nullptr) break;
     Assignment* expr = stmt->expression()->AsAssignment();
-    if (expr == NULL || expr->is_compound()) break;
+    if (expr == nullptr || expr->is_compound()) break;
     VariableProxy* proxy = expr->target()->AsVariableProxy();
-    if (proxy == NULL) break;
+    if (proxy == nullptr) break;
     Variable* var = proxy->var();
     if (var->location() != VariableLocation::PARAMETER || var->index() != i)
       break;
     RECURSE(VisitExpressionAnnotation(expr->value(), var, false));
-    if (property_info_ != NULL) {
+    if (property_info_ != nullptr) {
       SetVariableInfo(var, property_info_);
-      property_info_ = NULL;
+      property_info_ = nullptr;
     }
     SetType(var, computed_type_);
     type->AsFunction()->InitParameter(i, computed_type_);
@@ -224,10 +225,10 @@
                                          bool is_return) {
   // Normal +x or x|0 annotations.
   BinaryOperation* bin = expr->AsBinaryOperation();
-  if (bin != NULL) {
-    if (var != NULL) {
+  if (bin != nullptr) {
+    if (var != nullptr) {
       VariableProxy* proxy = bin->left()->AsVariableProxy();
-      if (proxy == NULL) {
+      if (proxy == nullptr) {
         FAIL(bin->left(), "expected variable for type annotation");
       }
       if (proxy->var() != var) {
@@ -235,7 +236,7 @@
       }
     }
     Literal* right = bin->right()->AsLiteral();
-    if (right != NULL) {
+    if (right != nullptr) {
       switch (bin->op()) {
         case Token::MUL:  // We encode +x as x*1.0
           if (right->raw_value()->ContainsDot() &&
@@ -269,10 +270,10 @@
   }
 
   Call* call = expr->AsCall();
-  if (call != NULL) {
+  if (call != nullptr) {
     VariableProxy* proxy = call->expression()->AsVariableProxy();
-    if (proxy != NULL) {
-      VariableInfo* info = GetVariableInfo(proxy->var(), false);
+    if (proxy != nullptr) {
+      VariableInfo* info = GetVariableInfo(proxy->var());
       if (!info ||
           (!info->is_check_function && !info->is_constructor_function)) {
         if (allow_simd_) {
@@ -448,14 +449,14 @@
   if (!in_function_) {
     FAIL(stmt, "for statement inside module body");
   }
-  if (stmt->init() != NULL) {
+  if (stmt->init() != nullptr) {
     RECURSE(Visit(stmt->init()));
   }
-  if (stmt->cond() != NULL) {
+  if (stmt->cond() != nullptr) {
     RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
                                  "for condition expected to be integer"));
   }
-  if (stmt->next() != NULL) {
+  if (stmt->next() != nullptr) {
     RECURSE(Visit(stmt->next()));
   }
   RECURSE(Visit(stmt->body()));
@@ -494,11 +495,11 @@
   Scope* scope = expr->scope();
   DCHECK(scope->is_function_scope());
 
-  if (!expr->bounds().upper->IsFunction()) {
+  if (!bounds_.get(expr).upper->IsFunction()) {
     FAIL(expr, "invalid function literal");
   }
 
-  Type* type = expr->bounds().upper;
+  Type* type = bounds_.get(expr).upper;
   Type* save_return_type = return_type_;
   return_type_ = type->AsFunction()->Result();
   in_function_ = true;
@@ -556,40 +557,31 @@
 
 
 void AsmTyper::VisitVariableProxy(VariableProxy* expr) {
-  VisitVariableProxy(expr, false);
-}
-
-void AsmTyper::VisitVariableProxy(VariableProxy* expr, bool assignment) {
   Variable* var = expr->var();
-  VariableInfo* info = GetVariableInfo(var, false);
-  if (!assignment && !in_function_ && !building_function_tables_ &&
-      !visiting_exports_) {
+  VariableInfo* info = GetVariableInfo(var);
+  if (!in_function_ && !building_function_tables_ && !visiting_exports_) {
     if (var->location() != VariableLocation::PARAMETER || var->index() >= 3) {
       FAIL(expr, "illegal variable reference in module body");
     }
   }
-  if (info == NULL || info->type == NULL) {
+  if (info == nullptr || info->type == nullptr) {
     if (var->mode() == TEMPORARY) {
       SetType(var, Type::Any());
-      info = GetVariableInfo(var, false);
+      info = GetVariableInfo(var);
     } else {
       FAIL(expr, "unbound variable");
     }
   }
-  if (property_info_ != NULL) {
+  if (property_info_ != nullptr) {
     SetVariableInfo(var, property_info_);
-    property_info_ = NULL;
+    property_info_ = nullptr;
   }
   Type* type = Type::Intersect(info->type, expected_type_, zone());
-  if (type->Is(cache_.kAsmInt)) {
-    type = cache_.kAsmInt;
-  }
-  info->type = type;
+  if (type->Is(cache_.kAsmInt)) type = cache_.kAsmInt;
   intish_ = 0;
   IntersectResult(expr, type);
 }
 
-
 void AsmTyper::VisitLiteral(Literal* expr, bool is_return) {
   intish_ = 0;
   Handle<Object> value = expr->value();
@@ -683,13 +675,35 @@
   RECURSE(VisitWithExpectation(
       expr->value(), type, "assignment value expected to match surrounding"));
   Type* target_type = StorageType(computed_type_);
+
   if (expr->target()->IsVariableProxy()) {
+    // Assignment to a local or context variable.
+    VariableProxy* proxy = expr->target()->AsVariableProxy();
     if (intish_ != 0) {
       FAIL(expr, "intish or floatish assignment");
     }
     expected_type_ = target_type;
-    VisitVariableProxy(expr->target()->AsVariableProxy(), true);
+    Variable* var = proxy->var();
+    VariableInfo* info = GetVariableInfo(var);
+    if (info == nullptr || info->type == nullptr) {
+      if (var->mode() == TEMPORARY) {
+        SetType(var, Type::Any());
+        info = GetVariableInfo(var);
+      } else {
+        FAIL(proxy, "unbound variable");
+      }
+    }
+    if (property_info_ != nullptr) {
+      SetVariableInfo(var, property_info_);
+      property_info_ = nullptr;
+    }
+    Type* type = Type::Intersect(info->type, expected_type_, zone());
+    if (type->Is(cache_.kAsmInt)) type = cache_.kAsmInt;
+    info->type = type;
+    intish_ = 0;
+    IntersectResult(proxy, type);
   } else if (expr->target()->IsProperty()) {
+    // Assignment to a property: should be a heap assignment {H[x] = y}.
     int32_t value_intish = intish_;
     Property* property = expr->target()->AsProperty();
     RECURSE(VisitWithExpectation(property->obj(), Type::Any(),
@@ -745,13 +759,13 @@
     }
     // TODO(bradnelson): Fix the parser and then un-comment this part
     // BinaryOperation* bin = expr->key()->AsBinaryOperation();
-    // if (bin == NULL || bin->op() != Token::BIT_AND) {
+    // if (bin == nullptr || bin->op() != Token::BIT_AND) {
     //   FAIL(expr->key(), "expected & in call");
     // }
     // RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
     //                              "array index expected to be integer"));
     // Literal* right = bin->right()->AsLiteral();
-    // if (right == NULL || right->raw_value()->ContainsDot()) {
+    // if (right == nullptr || right->raw_value()->ContainsDot()) {
     //   FAIL(right, "call mask must be integer");
     // }
     // RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
@@ -774,13 +788,13 @@
         RECURSE(Visit(expr->key()));
       } else {
         BinaryOperation* bin = expr->key()->AsBinaryOperation();
-        if (bin == NULL || bin->op() != Token::SAR) {
+        if (bin == nullptr || bin->op() != Token::SAR) {
           FAIL(expr->key(), "expected >> in heap access");
         }
         RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
                                      "array index expected to be integer"));
         Literal* right = bin->right()->AsLiteral();
-        if (right == NULL || right->raw_value()->ContainsDot()) {
+        if (right == nullptr || right->raw_value()->ContainsDot()) {
           FAIL(bin->right(), "heap access shift must be integer");
         }
         RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
@@ -790,7 +804,7 @@
           FAIL(right, "heap access shift must match element size");
         }
       }
-      expr->key()->set_bounds(Bounds(cache_.kAsmSigned));
+      bounds_.set(expr->key(), Bounds(cache_.kAsmSigned));
     }
     Type* result_type;
     if (type->Is(cache_.kAsmIntArrayElement)) {
@@ -830,18 +844,18 @@
 
 bool AsmTyper::IsStdlibObject(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
-  if (proxy == NULL) {
+  if (proxy == nullptr) {
     return false;
   }
   Variable* var = proxy->var();
-  VariableInfo* info = GetVariableInfo(var, false);
+  VariableInfo* info = GetVariableInfo(var);
   if (info) {
     if (info->standard_member == kStdlib) return true;
   }
   if (var->location() != VariableLocation::PARAMETER || var->index() != 0) {
     return false;
   }
-  info = GetVariableInfo(var, true);
+  info = MakeVariableInfo(var);
   info->type = Type::Object();
   info->standard_member = kStdlib;
   return true;
@@ -851,13 +865,13 @@
 Expression* AsmTyper::GetReceiverOfPropertyAccess(Expression* expr,
                                                   const char* name) {
   Property* property = expr->AsProperty();
-  if (property == NULL) {
-    return NULL;
+  if (property == nullptr) {
+    return nullptr;
   }
   Literal* key = property->key()->AsLiteral();
-  if (key == NULL || !key->IsPropertyName() ||
+  if (key == nullptr || !key->IsPropertyName() ||
       !key->AsPropertyName()->IsUtf8EqualTo(CStrVector(name))) {
-    return NULL;
+    return nullptr;
   }
   return property->obj();
 }
@@ -904,7 +918,7 @@
     return;
   }
 
-  property_info_ = NULL;
+  property_info_ = nullptr;
 
   // Only recurse at this point so that we avoid needing
   // stdlib.Math to have a real type.
@@ -913,12 +927,12 @@
 
   // For heap view or function table access.
   if (computed_type_->IsArray()) {
-    VisitHeapAccess(expr, false, NULL);
+    VisitHeapAccess(expr, false, nullptr);
     return;
   }
 
   VariableProxy* proxy = expr->obj()->AsVariableProxy();
-  if (proxy != NULL) {
+  if (proxy != nullptr) {
     Variable* var = proxy->var();
     if (var->location() == VariableLocation::PARAMETER && var->index() == 1) {
       // foreign.x - Function represent as () -> Any
@@ -941,7 +955,7 @@
   }
   // Handle polymorphic stdlib functions specially.
   Expression* arg0 = args->at(0);
-  Type* arg0_type = arg0->bounds().upper;
+  Type* arg0_type = bounds_.get(arg0).upper;
   switch (standard_member) {
     case kMathFround: {
       if (!arg0_type->Is(cache_.kAsmFloat) &&
@@ -970,8 +984,8 @@
         FAIL(arg0, "illegal function argument type");
       }
       if (args->length() > 1) {
-        Type* other = Type::Intersect(args->at(0)->bounds().upper,
-                                      args->at(1)->bounds().upper, zone());
+        Type* other = Type::Intersect(bounds_.get(args->at(0)).upper,
+                                      bounds_.get(args->at(1)).upper, zone());
         if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
             !other->Is(cache_.kAsmSigned)) {
           FAIL(arg0, "function arguments types don't match");
@@ -992,10 +1006,10 @@
   if (proxy) {
     standard_member = VariableAsStandardMember(proxy->var());
   }
-  if (!in_function_ && (proxy == NULL || standard_member != kMathFround)) {
+  if (!in_function_ && (proxy == nullptr || standard_member != kMathFround)) {
     FAIL(expr, "calls forbidden outside function bodies");
   }
-  if (proxy == NULL && !expr->expression()->IsProperty()) {
+  if (proxy == nullptr && !expr->expression()->IsProperty()) {
     FAIL(expr, "calls must be to bound variables or function tables");
   }
   if (computed_type_->IsFunction()) {
@@ -1018,8 +1032,8 @@
         }
       }
       intish_ = 0;
-      expr->expression()->set_bounds(
-          Bounds(Type::Function(Type::Any(), zone())));
+      bounds_.set(expr->expression(),
+                  Bounds(Type::Function(Type::Any(), zone())));
       IntersectResult(expr, expected_type);
     } else {
       if (fun_type->Arity() != args->length()) {
@@ -1071,7 +1085,7 @@
 
 
 void AsmTyper::VisitCallRuntime(CallRuntime* expr) {
-  // Allow runtime calls for now.
+  FAIL(expr, "runtime call not allowed");
 }
 
 
@@ -1185,11 +1199,12 @@
       RECURSE(VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmIntQ,
                                           cache_.kAsmSigned, true));
       if (expr->left()->IsCall() && expr->op() == Token::BIT_OR &&
-          Type::Number()->Is(expr->left()->bounds().upper)) {
+          Type::Number()->Is(bounds_.get(expr->left()).upper)) {
         // Force the return types of foreign functions.
-        expr->left()->set_bounds(Bounds(cache_.kAsmSigned));
+        bounds_.set(expr->left(), Bounds(cache_.kAsmSigned));
       }
-      if (in_function_ && !expr->left()->bounds().upper->Is(cache_.kAsmIntQ)) {
+      if (in_function_ &&
+          !bounds_.get(expr->left()).upper->Is(cache_.kAsmIntQ)) {
         FAIL(expr->left(), "intish required");
       }
       return;
@@ -1199,7 +1214,7 @@
       Literal* left = expr->left()->AsLiteral();
       if (left && left->value()->IsBoolean()) {
         if (left->ToBooleanIsTrue()) {
-          left->set_bounds(Bounds(cache_.kSingletonOne));
+          bounds_.set(left, Bounds(cache_.kSingletonOne));
           RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmIntQ,
                                        "not operator expects an integer"));
           IntersectResult(expr, cache_.kAsmSigned);
@@ -1286,13 +1301,13 @@
                  expr->right()->AsLiteral()->raw_value()->AsNumber() == 1.0) {
         // For unary +, expressed as x * 1.0
         if (expr->left()->IsCall() &&
-            Type::Number()->Is(expr->left()->bounds().upper)) {
+            Type::Number()->Is(bounds_.get(expr->left()).upper)) {
           // Force the return types of foreign functions.
-          expr->left()->set_bounds(Bounds(cache_.kAsmDouble));
-          left_type = expr->left()->bounds().upper;
+          bounds_.set(expr->left(), Bounds(cache_.kAsmDouble));
+          left_type = bounds_.get(expr->left()).upper;
         }
         if (!(expr->left()->IsProperty() &&
-              Type::Number()->Is(expr->left()->bounds().upper))) {
+              Type::Number()->Is(bounds_.get(expr->left()).upper))) {
           if (!left_type->Is(cache_.kAsmSigned) &&
               !left_type->Is(cache_.kAsmUnsigned) &&
               !left_type->Is(cache_.kAsmFixnum) &&
@@ -1310,7 +1325,7 @@
                  !expr->right()->AsLiteral()->raw_value()->ContainsDot() &&
                  expr->right()->AsLiteral()->raw_value()->AsNumber() == -1.0) {
         // For unary -, expressed as x * -1
-        expr->right()->set_bounds(Bounds(cache_.kAsmDouble));
+        bounds_.set(expr->right(), Bounds(cache_.kAsmDouble));
         IntersectResult(expr, cache_.kAsmDouble);
         return;
       } else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
@@ -1502,11 +1517,12 @@
 
 void AsmTyper::VisitLibraryAccess(ObjectTypeMap* map, Property* expr) {
   Literal* key = expr->key()->AsLiteral();
-  if (key == NULL || !key->IsPropertyName())
+  if (key == nullptr || !key->IsPropertyName())
     FAIL(expr, "invalid key used on stdlib member");
   Handle<String> name = key->AsPropertyName();
   VariableInfo* info = LibType(map, name);
-  if (info == NULL || info->type == NULL) FAIL(expr, "unknown stdlib function");
+  if (info == nullptr || info->type == nullptr)
+    FAIL(expr, "unknown stdlib function");
   SetResult(expr, info->type);
   property_info_ = info;
 }
@@ -1517,55 +1533,47 @@
   base::SmartArrayPointer<char> aname = name->ToCString();
   ObjectTypeMap::iterator i = map->find(std::string(aname.get()));
   if (i == map->end()) {
-    return NULL;
+    return nullptr;
   }
   return i->second;
 }
 
 
 void AsmTyper::SetType(Variable* variable, Type* type) {
-  VariableInfo* info = GetVariableInfo(variable, true);
+  VariableInfo* info = MakeVariableInfo(variable);
   info->type = type;
 }
 
 
 Type* AsmTyper::GetType(Variable* variable) {
-  VariableInfo* info = GetVariableInfo(variable, false);
-  if (!info) return NULL;
+  VariableInfo* info = GetVariableInfo(variable);
+  if (!info) return nullptr;
   return info->type;
 }
 
+AsmTyper::VariableInfo* AsmTyper::GetVariableInfo(Variable* variable) {
+  ZoneHashMap* map =
+      in_function_ ? &local_variable_type_ : &global_variable_type_;
+  ZoneHashMap::Entry* entry =
+      map->Lookup(variable, ComputePointerHash(variable));
+  if (!entry && in_function_) {
+    entry =
+        global_variable_type_.Lookup(variable, ComputePointerHash(variable));
+  }
+  return entry ? reinterpret_cast<VariableInfo*>(entry->value) : nullptr;
+}
 
-AsmTyper::VariableInfo* AsmTyper::GetVariableInfo(Variable* variable,
-                                                  bool setting) {
-  ZoneHashMap::Entry* entry;
-  ZoneHashMap* map;
-  if (in_function_) {
-    map = &local_variable_type_;
-  } else {
-    map = &global_variable_type_;
-  }
-  if (setting) {
-    entry = map->LookupOrInsert(variable, ComputePointerHash(variable),
-                                ZoneAllocationPolicy(zone()));
-  } else {
-    entry = map->Lookup(variable, ComputePointerHash(variable));
-    if (!entry && in_function_) {
-      entry =
-          global_variable_type_.Lookup(variable, ComputePointerHash(variable));
-    }
-  }
-  if (!entry) return NULL;
-  if (!entry->value) {
-    if (!setting) return NULL;
-    entry->value = new (zone()) VariableInfo;
-  }
+AsmTyper::VariableInfo* AsmTyper::MakeVariableInfo(Variable* variable) {
+  ZoneHashMap* map =
+      in_function_ ? &local_variable_type_ : &global_variable_type_;
+  ZoneHashMap::Entry* entry = map->LookupOrInsert(
+      variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone()));
+  if (!entry->value) entry->value = new (zone()) VariableInfo;
   return reinterpret_cast<VariableInfo*>(entry->value);
 }
 
-
 void AsmTyper::SetVariableInfo(Variable* variable, const VariableInfo* info) {
-  VariableInfo* dest = GetVariableInfo(variable, true);
+  VariableInfo* dest = MakeVariableInfo(variable);
   dest->type = info->type;
   dest->is_check_function = info->is_check_function;
   dest->is_constructor_function = info->is_constructor_function;
@@ -1575,7 +1583,7 @@
 
 AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(
     Variable* variable) {
-  VariableInfo* info = GetVariableInfo(variable, false);
+  VariableInfo* info = GetVariableInfo(variable);
   if (!info) return kNone;
   return info->standard_member;
 }
@@ -1583,14 +1591,14 @@
 
 void AsmTyper::SetResult(Expression* expr, Type* type) {
   computed_type_ = type;
-  expr->set_bounds(Bounds(computed_type_));
+  bounds_.set(expr, Bounds(computed_type_));
 }
 
 
 void AsmTyper::IntersectResult(Expression* expr, Type* type) {
   computed_type_ = type;
   Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
-  expr->set_bounds(Bounds(bounded_type));
+  bounds_.set(expr, Bounds(bounded_type));
 }
 
 
diff --git a/src/typing-asm.h b/src/typing-asm.h
index c7984b2..09eacaa 100644
--- a/src/typing-asm.h
+++ b/src/typing-asm.h
@@ -6,6 +6,7 @@
 #define V8_TYPING_ASM_H_
 
 #include "src/allocation.h"
+#include "src/ast/ast-type-bounds.h"
 #include "src/ast/ast.h"
 #include "src/effects.h"
 #include "src/type-info.h"
@@ -24,6 +25,7 @@
   bool Validate();
   void set_allow_simd(bool simd) { allow_simd_ = simd; }
   const char* error_message() { return error_message_; }
+  const AstTypeBounds* bounds() { return &bounds_; }
 
   enum StandardMember {
     kNone = 0,
@@ -117,6 +119,8 @@
 
   TypeCache const& cache_;
 
+  AstTypeBounds bounds_;
+
   static const int kErrorMessageLimit = 100;
   char error_message_[kErrorMessageLimit];
 
@@ -151,7 +155,8 @@
 
   void SetType(Variable* variable, Type* type);
   Type* GetType(Variable* variable);
-  VariableInfo* GetVariableInfo(Variable* variable, bool setting);
+  VariableInfo* GetVariableInfo(Variable* variable);
+  VariableInfo* MakeVariableInfo(Variable* variable);
   void SetVariableInfo(Variable* variable, const VariableInfo* info);
 
   VariableInfo* LibType(ObjectTypeMap* map, Handle<String> name);
@@ -165,8 +170,6 @@
 
   void VisitLiteral(Literal* expr, bool is_return);
 
-  void VisitVariableProxy(VariableProxy* expr, bool assignment);
-
   void VisitIntegerBitwiseOperator(BinaryOperation* expr, Type* left_expected,
                                    Type* right_expected, Type* result_type,
                                    bool conversion);
diff --git a/src/typing-reset.cc b/src/typing-reset.cc
deleted file mode 100644
index c22f7a9..0000000
--- a/src/typing-reset.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/typing-reset.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-TypingReseter::TypingReseter(Isolate* isolate, FunctionLiteral* root)
-    : AstExpressionVisitor(isolate, root) {}
-
-
-void TypingReseter::VisitExpression(Expression* expression) {
-  expression->set_bounds(Bounds::Unbounded());
-}
-}  // namespace internal
-}  // namespace v8
diff --git a/src/typing-reset.h b/src/typing-reset.h
deleted file mode 100644
index 3e1969d..0000000
--- a/src/typing-reset.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPING_RESET_H_
-#define V8_TYPING_RESET_H_
-
-#include "src/ast/ast-expression-visitor.h"
-
-namespace v8 {
-namespace internal {
-
-// A Visitor over a CompilationInfo's AST that resets
-// typing bounds back to their default.
-
-class TypingReseter : public AstExpressionVisitor {
- public:
-  TypingReseter(Isolate* isolate, FunctionLiteral* root);
-
- protected:
-  void VisitExpression(Expression* expression) override;
-};
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TYPING_RESET_H_
diff --git a/src/unicode-inl.h b/src/unicode-inl.h
index b22e482..ebebfaa 100644
--- a/src/unicode-inl.h
+++ b/src/unicode-inl.h
@@ -137,6 +137,12 @@
   }
 }
 
+bool Utf8::IsValidCharacter(uchar c) {
+  return c < 0xD800u || (c >= 0xE000u && c < 0xFDD0u) ||
+         (c > 0xFDEFu && c <= 0x10FFFFu && (c & 0xFFFEu) != 0xFFFEu &&
+          c != kBadChar);
+}
+
 }  // namespace unibrow
 
 #endif  // V8_UNICODE_INL_H_
diff --git a/src/unicode.cc b/src/unicode.cc
index de5e360..db98be8 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -305,6 +305,20 @@
          0x03C82080;
 }
 
+bool Utf8::Validate(const byte* bytes, size_t length) {
+  size_t cursor = 0;
+
+  // Performance optimization: Skip over single-byte values first.
+  while (cursor < length && bytes[cursor] <= kMaxOneByteChar) {
+    ++cursor;
+  }
+
+  while (cursor < length) {
+    uchar c = ValueOf(bytes + cursor, length - cursor, &cursor);
+    if (!IsValidCharacter(c)) return false;
+  }
+  return true;
+}
 
 // Uppercase:            point.category == 'Lu'
 
diff --git a/src/unicode.h b/src/unicode.h
index 7471a63..35717bc 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -155,6 +155,11 @@
   // UTF-8.
   static const unsigned kMax16BitCodeUnitSize  = 3;
   static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
+
+  // Excludes non-characters from the set of valid code points.
+  static inline bool IsValidCharacter(uchar c);
+
+  static bool Validate(const byte* str, size_t length);
 };
 
 struct Uppercase {
diff --git a/src/uri.cc b/src/uri.cc
new file mode 100644
index 0000000..c459be5
--- /dev/null
+++ b/src/uri.cc
@@ -0,0 +1,135 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/uri.h"
+
+#include "src/char-predicates-inl.h"
+#include "src/handles.h"
+#include "src/isolate-inl.h"
+#include "src/list.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {  // anonymous namespace for EncodeURI helper functions
+bool IsUnescapePredicateInUriComponent(uc16 c) {
+  if (IsAlphaNumeric(c)) {
+    return true;
+  }
+
+  switch (c) {
+    case '!':
+    case '\'':
+    case '(':
+    case ')':
+    case '*':
+    case '-':
+    case '.':
+    case '_':
+    case '~':
+      return true;
+    default:
+      return false;
+  }
+}
+
+bool IsUriSeparator(uc16 c) {
+  switch (c) {
+    case '#':
+    case ':':
+    case ';':
+    case '/':
+    case '?':
+    case '$':
+    case '&':
+    case '+':
+    case ',':
+    case '@':
+    case '=':
+      return true;
+    default:
+      return false;
+  }
+}
+
+void AddHexEncodedToBuffer(uint8_t octet, List<uint8_t>* buffer) {
+  buffer->Add('%');
+  buffer->Add(HexCharOfValue(octet >> 4));
+  buffer->Add(HexCharOfValue(octet & 0x0F));
+}
+
+void EncodeSingle(uc16 c, List<uint8_t>* buffer) {
+  uint8_t x = (c >> 12) & 0xF;
+  uint8_t y = (c >> 6) & 63;
+  uint8_t z = c & 63;
+  if (c <= 0x007F) {
+    AddHexEncodedToBuffer(c, buffer);
+  } else if (c <= 0x07FF) {
+    AddHexEncodedToBuffer(y + 192, buffer);
+    AddHexEncodedToBuffer(z + 128, buffer);
+  } else {
+    AddHexEncodedToBuffer(x + 224, buffer);
+    AddHexEncodedToBuffer(y + 128, buffer);
+    AddHexEncodedToBuffer(z + 128, buffer);
+  }
+}
+
+void EncodePair(uc16 cc1, uc16 cc2, List<uint8_t>* buffer) {
+  uint8_t u = ((cc1 >> 6) & 0xF) + 1;
+  uint8_t w = (cc1 >> 2) & 0xF;
+  uint8_t x = cc1 & 3;
+  uint8_t y = (cc2 >> 6) & 0xF;
+  uint8_t z = cc2 & 63;
+  AddHexEncodedToBuffer((u >> 2) + 240, buffer);
+  AddHexEncodedToBuffer((((u & 3) << 4) | w) + 128, buffer);
+  AddHexEncodedToBuffer(((x << 4) | y) + 128, buffer);
+  AddHexEncodedToBuffer(z + 128, buffer);
+}
+
+}  // anonymous namespace
+
+Object* Uri::Encode(Isolate* isolate, Handle<String> uri, bool is_uri) {
+  uri = String::Flatten(uri);
+  int uri_length = uri->length();
+  List<uint8_t> buffer(uri_length);
+
+  {
+    DisallowHeapAllocation no_gc;
+    String::FlatContent uri_content = uri->GetFlatContent();
+
+    for (int k = 0; k < uri_length; k++) {
+      uc16 cc1 = uri_content.Get(k);
+      if (unibrow::Utf16::IsLeadSurrogate(cc1)) {
+        k++;
+        if (k < uri_length) {
+          uc16 cc2 = uri->Get(k);
+          if (unibrow::Utf16::IsTrailSurrogate(cc2)) {
+            EncodePair(cc1, cc2, &buffer);
+            continue;
+          }
+        }
+      } else if (!unibrow::Utf16::IsTrailSurrogate(cc1)) {
+        if (IsUnescapePredicateInUriComponent(cc1) ||
+            (is_uri && IsUriSeparator(cc1))) {
+          buffer.Add(cc1);
+        } else {
+          EncodeSingle(cc1, &buffer);
+        }
+        continue;
+      }
+
+      AllowHeapAllocation allocate_error_and_return;
+      THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewURIError());
+    }
+  }
+
+  Handle<String> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      isolate->factory()->NewStringFromOneByte(buffer.ToConstVector()));
+  return *result;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/uri.h b/src/uri.h
new file mode 100644
index 0000000..e41e8a2
--- /dev/null
+++ b/src/uri.h
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_URI_H_
+#define V8_URI_H_
+
+#include "src/allocation.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class Uri : public AllStatic {
+ public:
+  static Object* EncodeUri(Isolate* isolate, Handle<String> uri) {
+    return Encode(isolate, uri, true);
+  }
+
+  static Object* EncodeUriComponent(Isolate* isolate,
+                                    Handle<String> component) {
+    return Encode(isolate, component, false);
+  }
+
+  // DecodeUri
+  // DecodeUriComponent
+  // escape
+  // unescape
+
+ private:
+  static Object* Encode(Isolate* isolate, Handle<String> uri, bool is_uri);
+  // decode
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_URI_H_
diff --git a/src/utils.h b/src/utils.h
index 44865ed..9a60141 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -13,6 +13,7 @@
 #include "include/v8.h"
 #include "src/allocation.h"
 #include "src/base/bits.h"
+#include "src/base/compiler-specific.h"
 #include "src/base/logging.h"
 #include "src/base/macros.h"
 #include "src/base/platform/platform.h"
@@ -36,6 +37,11 @@
   return -1;
 }
 
+inline char HexCharOfValue(int value) {
+  DCHECK(0 <= value && value <= 16);
+  if (value < 10) return value + '0';
+  return value - 10 + 'A';
+}
 
 inline int BoolToInt(bool b) { return b ? 1 : 0; }
 
@@ -932,42 +938,21 @@
 // ----------------------------------------------------------------------------
 // I/O support.
 
-#if __GNUC__ >= 4
-// On gcc we can ask the compiler to check the types of %d-style format
-// specifiers and their associated arguments.  TODO(erikcorry) fix this
-// so it works on MacOSX.
-#if defined(__MACH__) && defined(__APPLE__)
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#define PRINTF_METHOD_CHECKING
-#define FPRINTF_METHOD_CHECKING
-#else  // MacOsX.
-#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
-#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
-#define PRINTF_METHOD_CHECKING __attribute__ ((format (printf, 2, 3)))
-#define FPRINTF_METHOD_CHECKING __attribute__ ((format (printf, 3, 4)))
-#endif
-#else
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#define PRINTF_METHOD_CHECKING
-#define FPRINTF_METHOD_CHECKING
-#endif
-
 // Our version of printf().
-void PRINTF_CHECKING PrintF(const char* format, ...);
-void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
+void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...);
+void PRINTF_FORMAT(2, 3) PrintF(FILE* out, const char* format, ...);
 
 // Prepends the current process ID to the output.
-void PRINTF_CHECKING PrintPID(const char* format, ...);
+void PRINTF_FORMAT(1, 2) PrintPID(const char* format, ...);
 
 // Prepends the current process ID and given isolate pointer to the output.
-void PrintIsolate(void* isolate, const char* format, ...);
+void PRINTF_FORMAT(2, 3) PrintIsolate(void* isolate, const char* format, ...);
 
 // Safe formatting print. Ensures that str is always null-terminated.
 // Returns the number of chars written, or -1 if output was truncated.
-int FPRINTF_CHECKING SNPrintF(Vector<char> str, const char* format, ...);
-int VSNPrintF(Vector<char> str, const char* format, va_list args);
+int PRINTF_FORMAT(2, 3) SNPrintF(Vector<char> str, const char* format, ...);
+int PRINTF_FORMAT(2, 0)
+    VSNPrintF(Vector<char> str, const char* format, va_list args);
 
 void StrNCpy(Vector<char> dest, const char* src, size_t n);
 
@@ -1470,10 +1455,11 @@
   StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
 
   // Add formatted contents to the builder just like printf().
-  void AddFormatted(const char* format, ...);
+  void PRINTF_FORMAT(2, 3) AddFormatted(const char* format, ...);
 
   // Add formatted contents like printf based on a va_list.
-  void AddFormattedList(const char* format, va_list list);
+  void PRINTF_FORMAT(2, 0) AddFormattedList(const char* format, va_list list);
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
 };
diff --git a/src/v8.gyp b/src/v8.gyp
new file mode 100644
index 0000000..ef0c562
--- /dev/null
+++ b/src/v8.gyp
@@ -0,0 +1,2251 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'variables': {
+    'icu_use_data_file_flag%': 0,
+    'v8_code': 1,
+    'v8_random_seed%': 314159265,
+    'v8_vector_stores%': 0,
+    'embed_script%': "",
+    'warmup_script%': "",
+    'v8_extra_library_files%': [],
+    'v8_experimental_extra_library_files%': [],
+    'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
+  },
+  'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'v8',
+      'dependencies_traverse': 1,
+      'dependencies': ['v8_maybe_snapshot'],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+        ['component=="shared_library"', {
+          'type': '<(component)',
+          'sources': [
+            # Note: on non-Windows we still build this file so that gyp
+            # has some sources to link into the component.
+            'v8dll-main.cc',
+          ],
+          'include_dirs': [
+            '..',
+          ],
+          'defines': [
+            'V8_SHARED',
+            'BUILDING_V8_SHARED',
+          ],
+          'direct_dependent_settings': {
+            'defines': [
+              'V8_SHARED',
+              'USING_V8_SHARED',
+            ],
+          },
+          'target_conditions': [
+            ['OS=="android" and _toolset=="target"', {
+              'libraries': [
+                '-llog',
+              ],
+              'include_dirs': [
+                'src/common/android/include',
+              ],
+            }],
+          ],
+          'conditions': [
+            ['OS=="mac"', {
+              'xcode_settings': {
+                'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
+              },
+            }],
+            ['soname_version!=""', {
+              'product_extension': 'so.<(soname_version)',
+            }],
+          ],
+        },
+        {
+          'type': 'none',
+        }],
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '../include',
+        ],
+      },
+    },
+    {
+      # This rule delegates to either v8_snapshot, v8_nosnapshot, or
+      # v8_external_snapshot, depending on the current variables.
+      # The intention is to make the 'calling' rules a bit simpler.
+      'target_name': 'v8_maybe_snapshot',
+      'type': 'none',
+      'conditions': [
+        ['v8_use_snapshot!="true"', {
+          # The dependency on v8_base should come from a transitive
+          # dependency however the Android toolchain requires libv8_base.a
+          # to appear before libv8_snapshot.a so it's listed explicitly.
+          'dependencies': ['v8_base', 'v8_nosnapshot'],
+        }],
+        ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
+          # The dependency on v8_base should come from a transitive
+          # dependency however the Android toolchain requires libv8_base.a
+          # to appear before libv8_snapshot.a so it's listed explicitly.
+          'dependencies': ['v8_base', 'v8_snapshot'],
+        }],
+        ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
+          'dependencies': ['v8_base', 'v8_external_snapshot'],
+          'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
+        }],
+        ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
+          'dependencies': ['v8_base', 'v8_external_snapshot'],
+          'target_conditions': [
+            ['_toolset=="host"', {
+              'inputs': [
+                '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+              ],
+            }, {
+              'inputs': [
+                '<(PRODUCT_DIR)/snapshot_blob.bin',
+              ],
+            }],
+          ],
+        }],
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ]
+    },
+    {
+      'target_name': 'v8_snapshot',
+      'type': 'static_library',
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+          'dependencies': [
+            'mksnapshot#host',
+            'js2c#host',
+          ],
+        }, {
+          'toolsets': ['target'],
+          'dependencies': [
+            'mksnapshot',
+            'js2c',
+          ],
+        }],
+        ['component=="shared_library"', {
+          'defines': [
+            'V8_SHARED',
+            'BUILDING_V8_SHARED',
+          ],
+          'direct_dependent_settings': {
+            'defines': [
+              'V8_SHARED',
+              'USING_V8_SHARED',
+            ],
+          },
+        }],
+      ],
+      'dependencies': [
+        'v8_base',
+      ],
+      'include_dirs+': [
+        '..',
+      ],
+      'sources': [
+        '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+        '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+        '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+        '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+        '<(INTERMEDIATE_DIR)/snapshot.cc',
+      ],
+      'actions': [
+        {
+          'action_name': 'run_mksnapshot',
+          'inputs': [
+            '<(mksnapshot_exec)',
+          ],
+          'conditions': [
+            ['embed_script!=""', {
+              'inputs': [
+                '<(embed_script)',
+              ],
+            }],
+            ['warmup_script!=""', {
+              'inputs': [
+                '<(warmup_script)',
+              ],
+            }],
+          ],
+          'outputs': [
+            '<(INTERMEDIATE_DIR)/snapshot.cc',
+          ],
+          'variables': {
+            'mksnapshot_flags': [],
+            'conditions': [
+              ['v8_random_seed!=0', {
+                'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+              }],
+              ['v8_vector_stores!=0', {
+                'mksnapshot_flags': ['--vector-stores'],
+              }],
+            ],
+          },
+          'action': [
+            '<(mksnapshot_exec)',
+            '<@(mksnapshot_flags)',
+            '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
+            '<(embed_script)',
+            '<(warmup_script)',
+          ],
+        },
+      ],
+    },
+    {
+      'target_name': 'v8_nosnapshot',
+      'type': 'static_library',
+      'dependencies': [
+        'v8_base',
+      ],
+      'include_dirs+': [
+        '..',
+      ],
+      'sources': [
+        '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+        '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+        '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+        '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+        'snapshot/snapshot-empty.cc',
+      ],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+          'dependencies': ['js2c#host'],
+        }, {
+          'toolsets': ['target'],
+          'dependencies': ['js2c'],
+        }],
+        ['component=="shared_library"', {
+          'defines': [
+            'BUILDING_V8_SHARED',
+            'V8_SHARED',
+          ],
+        }],
+      ]
+    },
+    {
+      'target_name': 'v8_external_snapshot',
+      'type': 'static_library',
+      'conditions': [
+        [ 'v8_use_external_startup_data==1', {
+          'conditions': [
+            ['want_separate_host_toolset==1', {
+              'toolsets': ['host', 'target'],
+              'dependencies': [
+                'mksnapshot#host',
+                'js2c#host',
+                'natives_blob',
+            ]}, {
+              'toolsets': ['target'],
+              'dependencies': [
+                'mksnapshot',
+                'js2c',
+                'natives_blob',
+              ],
+            }],
+            ['component=="shared_library"', {
+              'defines': [
+                'V8_SHARED',
+                'BUILDING_V8_SHARED',
+              ],
+              'direct_dependent_settings': {
+                'defines': [
+                  'V8_SHARED',
+                  'USING_V8_SHARED',
+                ],
+              },
+            }],
+          ],
+          'dependencies': [
+            'v8_base',
+          ],
+          'include_dirs+': [
+            '..',
+          ],
+          'sources': [
+            'snapshot/natives-external.cc',
+            'snapshot/snapshot-external.cc',
+          ],
+          'actions': [
+            {
+              'action_name': 'run_mksnapshot (external)',
+              'inputs': [
+                '<(mksnapshot_exec)',
+              ],
+              'variables': {
+                'mksnapshot_flags': [],
+                'conditions': [
+                  ['v8_random_seed!=0', {
+                    'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+                  }],
+                  ['v8_vector_stores!=0', {
+                    'mksnapshot_flags': ['--vector-stores'],
+                  }],
+                ],
+              },
+              'conditions': [
+                ['embed_script!=""', {
+                  'inputs': [
+                    '<(embed_script)',
+                  ],
+                }],
+                ['warmup_script!=""', {
+                  'inputs': [
+                    '<(warmup_script)',
+                  ],
+                }],
+                ['want_separate_host_toolset==1', {
+                  'target_conditions': [
+                    ['_toolset=="host"', {
+                      'outputs': [
+                        '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+                      ],
+                      'action': [
+                        '<(mksnapshot_exec)',
+                        '<@(mksnapshot_flags)',
+                        '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+                        '<(embed_script)',
+                        '<(warmup_script)',
+                      ],
+                    }, {
+                      'outputs': [
+                        '<(PRODUCT_DIR)/snapshot_blob.bin',
+                      ],
+                      'action': [
+                        '<(mksnapshot_exec)',
+                        '<@(mksnapshot_flags)',
+                        '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+                        '<(embed_script)',
+                        '<(warmup_script)',
+                      ],
+                    }],
+                  ],
+                }, {
+                  'outputs': [
+                    '<(PRODUCT_DIR)/snapshot_blob.bin',
+                  ],
+                  'action': [
+                    '<(mksnapshot_exec)',
+                    '<@(mksnapshot_flags)',
+                    '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+                    '<(embed_script)',
+                    '<(warmup_script)',
+                  ],
+                }],
+              ],
+            },
+          ],
+        }],
+      ],
+    },
+    {
+      'target_name': 'v8_base',
+      'type': 'static_library',
+      'dependencies': [
+        'v8_libbase',
+      ],
+      'variables': {
+        'optimize': 'max',
+      },
+      'include_dirs+': [
+        '..',
+        # To be able to find base/trace_event/common/trace_event_common.h
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        '../include/v8-debug.h',
+        '../include/v8-experimental.h',
+        '../include/v8-platform.h',
+        '../include/v8-profiler.h',
+        '../include/v8-testing.h',
+        '../include/v8-util.h',
+        '../include/v8-version.h',
+        '../include/v8.h',
+        '../include/v8config.h',
+        'accessors.cc',
+        'accessors.h',
+        'address-map.cc',
+        'address-map.h',
+        'allocation.cc',
+        'allocation.h',
+        'allocation-site-scopes.cc',
+        'allocation-site-scopes.h',
+        'api-experimental.cc',
+        'api-experimental.h',
+        'api.cc',
+        'api.h',
+        'api-arguments.cc',
+        'api-arguments.h',
+        'api-natives.cc',
+        'api-natives.h',
+        'arguments.cc',
+        'arguments.h',
+        'assembler.cc',
+        'assembler.h',
+        'assert-scope.h',
+        'assert-scope.cc',
+        'ast/ast-expression-rewriter.cc',
+        'ast/ast-expression-rewriter.h',
+        'ast/ast-expression-visitor.cc',
+        'ast/ast-expression-visitor.h',
+        'ast/ast-literal-reindexer.cc',
+        'ast/ast-literal-reindexer.h',
+        'ast/ast-numbering.cc',
+        'ast/ast-numbering.h',
+        'ast/ast-type-bounds.h',
+        'ast/ast-value-factory.cc',
+        'ast/ast-value-factory.h',
+        'ast/ast.cc',
+        'ast/ast.h',
+        'ast/modules.cc',
+        'ast/modules.h',
+        'ast/prettyprinter.cc',
+        'ast/prettyprinter.h',
+        'ast/scopeinfo.cc',
+        'ast/scopeinfo.h',
+        'ast/scopes.cc',
+        'ast/scopes.h',
+        'ast/variables.cc',
+        'ast/variables.h',
+        'background-parsing-task.cc',
+        'background-parsing-task.h',
+        'bailout-reason.cc',
+        'bailout-reason.h',
+        'basic-block-profiler.cc',
+        'basic-block-profiler.h',
+        'bignum-dtoa.cc',
+        'bignum-dtoa.h',
+        'bignum.cc',
+        'bignum.h',
+        'bit-vector.cc',
+        'bit-vector.h',
+        'bootstrapper.cc',
+        'bootstrapper.h',
+        'builtins.cc',
+        'builtins.h',
+        'cached-powers.cc',
+        'cached-powers.h',
+        'cancelable-task.cc',
+        'cancelable-task.h',
+        'char-predicates.cc',
+        'char-predicates-inl.h',
+        'char-predicates.h',
+        'checks.h',
+        'code-factory.cc',
+        'code-factory.h',
+        'code-stub-assembler.cc',
+        'code-stub-assembler.h',
+        'code-stubs.cc',
+        'code-stubs.h',
+        'code-stubs-hydrogen.cc',
+        'codegen.cc',
+        'codegen.h',
+        'collector.h',
+        'compilation-cache.cc',
+        'compilation-cache.h',
+        'compilation-dependencies.cc',
+        'compilation-dependencies.h',
+        'compilation-statistics.cc',
+        'compilation-statistics.h',
+        'compiler/access-builder.cc',
+        'compiler/access-builder.h',
+        'compiler/access-info.cc',
+        'compiler/access-info.h',
+        'compiler/all-nodes.cc',
+        'compiler/all-nodes.h',
+        'compiler/ast-graph-builder.cc',
+        'compiler/ast-graph-builder.h',
+        'compiler/ast-loop-assignment-analyzer.cc',
+        'compiler/ast-loop-assignment-analyzer.h',
+        'compiler/basic-block-instrumentor.cc',
+        'compiler/basic-block-instrumentor.h',
+        'compiler/branch-elimination.cc',
+        'compiler/branch-elimination.h',
+        'compiler/bytecode-branch-analysis.cc',
+        'compiler/bytecode-branch-analysis.h',
+        'compiler/bytecode-graph-builder.cc',
+        'compiler/bytecode-graph-builder.h',
+        'compiler/c-linkage.cc',
+        'compiler/coalesced-live-ranges.cc',
+        'compiler/coalesced-live-ranges.h',
+        'compiler/code-generator-impl.h',
+        'compiler/code-generator.cc',
+        'compiler/code-generator.h',
+        'compiler/code-assembler.cc',
+        'compiler/code-assembler.h',
+        'compiler/common-node-cache.cc',
+        'compiler/common-node-cache.h',
+        'compiler/common-operator-reducer.cc',
+        'compiler/common-operator-reducer.h',
+        'compiler/common-operator.cc',
+        'compiler/common-operator.h',
+        'compiler/control-builders.cc',
+        'compiler/control-builders.h',
+        'compiler/control-equivalence.cc',
+        'compiler/control-equivalence.h',
+        'compiler/control-flow-optimizer.cc',
+        'compiler/control-flow-optimizer.h',
+        'compiler/dead-code-elimination.cc',
+        'compiler/dead-code-elimination.h',
+        'compiler/diamond.h',
+        'compiler/effect-control-linearizer.cc',
+        'compiler/effect-control-linearizer.h',
+        'compiler/escape-analysis.cc',
+        'compiler/escape-analysis.h',
+        "compiler/escape-analysis-reducer.cc",
+        "compiler/escape-analysis-reducer.h",
+        'compiler/frame.cc',
+        'compiler/frame.h',
+        'compiler/frame-elider.cc',
+        'compiler/frame-elider.h',
+        "compiler/frame-states.cc",
+        "compiler/frame-states.h",
+        'compiler/gap-resolver.cc',
+        'compiler/gap-resolver.h',
+        'compiler/graph-reducer.cc',
+        'compiler/graph-reducer.h',
+        'compiler/graph-replay.cc',
+        'compiler/graph-replay.h',
+        'compiler/graph-trimmer.cc',
+        'compiler/graph-trimmer.h',
+        'compiler/graph-visualizer.cc',
+        'compiler/graph-visualizer.h',
+        'compiler/graph.cc',
+        'compiler/graph.h',
+        'compiler/greedy-allocator.cc',
+        'compiler/greedy-allocator.h',
+        'compiler/instruction-codes.h',
+        'compiler/instruction-selector-impl.h',
+        'compiler/instruction-selector.cc',
+        'compiler/instruction-selector.h',
+        'compiler/instruction-scheduler.cc',
+        'compiler/instruction-scheduler.h',
+        'compiler/instruction.cc',
+        'compiler/instruction.h',
+        'compiler/int64-lowering.cc',
+        'compiler/int64-lowering.h',
+        'compiler/js-builtin-reducer.cc',
+        'compiler/js-builtin-reducer.h',
+        'compiler/js-call-reducer.cc',
+        'compiler/js-call-reducer.h',
+        'compiler/js-context-specialization.cc',
+        'compiler/js-context-specialization.h',
+        'compiler/js-create-lowering.cc',
+        'compiler/js-create-lowering.h',
+        'compiler/js-frame-specialization.cc',
+        'compiler/js-frame-specialization.h',
+        'compiler/js-generic-lowering.cc',
+        'compiler/js-generic-lowering.h',
+        'compiler/js-global-object-specialization.cc',
+        'compiler/js-global-object-specialization.h',
+        'compiler/js-graph.cc',
+        'compiler/js-graph.h',
+        'compiler/js-inlining.cc',
+        'compiler/js-inlining.h',
+        'compiler/js-inlining-heuristic.cc',
+        'compiler/js-inlining-heuristic.h',
+        'compiler/js-intrinsic-lowering.cc',
+        'compiler/js-intrinsic-lowering.h',
+        'compiler/js-native-context-specialization.cc',
+        'compiler/js-native-context-specialization.h',
+        'compiler/js-operator.cc',
+        'compiler/js-operator.h',
+        'compiler/js-typed-lowering.cc',
+        'compiler/js-typed-lowering.h',
+        'compiler/jump-threading.cc',
+        'compiler/jump-threading.h',
+        'compiler/linkage.cc',
+        'compiler/linkage.h',
+        'compiler/liveness-analyzer.cc',
+        'compiler/liveness-analyzer.h',
+        'compiler/live-range-separator.cc',
+        'compiler/live-range-separator.h',
+        'compiler/load-elimination.cc',
+        'compiler/load-elimination.h',
+        'compiler/loop-analysis.cc',
+        'compiler/loop-analysis.h',
+        'compiler/loop-peeling.cc',
+        'compiler/loop-peeling.h',
+        'compiler/machine-operator-reducer.cc',
+        'compiler/machine-operator-reducer.h',
+        'compiler/machine-operator.cc',
+        'compiler/machine-operator.h',
+        'compiler/memory-optimizer.cc',
+        'compiler/memory-optimizer.h',
+        'compiler/move-optimizer.cc',
+        'compiler/move-optimizer.h',
+        'compiler/node-aux-data.h',
+        'compiler/node-cache.cc',
+        'compiler/node-cache.h',
+        'compiler/node-marker.cc',
+        'compiler/node-marker.h',
+        'compiler/node-matchers.cc',
+        'compiler/node-matchers.h',
+        'compiler/node-properties.cc',
+        'compiler/node-properties.h',
+        'compiler/node.cc',
+        'compiler/node.h',
+        'compiler/opcodes.cc',
+        'compiler/opcodes.h',
+        'compiler/operator-properties.cc',
+        'compiler/operator-properties.h',
+        'compiler/operator.cc',
+        'compiler/operator.h',
+        'compiler/osr.cc',
+        'compiler/osr.h',
+        'compiler/pipeline.cc',
+        'compiler/pipeline.h',
+        'compiler/pipeline-statistics.cc',
+        'compiler/pipeline-statistics.h',
+        'compiler/raw-machine-assembler.cc',
+        'compiler/raw-machine-assembler.h',
+        'compiler/register-allocator.cc',
+        'compiler/register-allocator.h',
+        'compiler/register-allocator-verifier.cc',
+        'compiler/register-allocator-verifier.h',
+        'compiler/representation-change.cc',
+        'compiler/representation-change.h',
+        'compiler/schedule.cc',
+        'compiler/schedule.h',
+        'compiler/scheduler.cc',
+        'compiler/scheduler.h',
+        'compiler/select-lowering.cc',
+        'compiler/select-lowering.h',
+        'compiler/simplified-lowering.cc',
+        'compiler/simplified-lowering.h',
+        'compiler/simplified-operator-reducer.cc',
+        'compiler/simplified-operator-reducer.h',
+        'compiler/simplified-operator.cc',
+        'compiler/simplified-operator.h',
+        'compiler/source-position.cc',
+        'compiler/source-position.h',
+        'compiler/state-values-utils.cc',
+        'compiler/state-values-utils.h',
+        'compiler/tail-call-optimization.cc',
+        'compiler/tail-call-optimization.h',
+        'compiler/type-hint-analyzer.cc',
+        'compiler/type-hint-analyzer.h',
+        'compiler/type-hints.cc',
+        'compiler/type-hints.h',
+        'compiler/typer.cc',
+        'compiler/typer.h',
+        'compiler/value-numbering-reducer.cc',
+        'compiler/value-numbering-reducer.h',
+        'compiler/verifier.cc',
+        'compiler/verifier.h',
+        'compiler/wasm-compiler.cc',
+        'compiler/wasm-compiler.h',
+        'compiler/wasm-linkage.cc',
+        'compiler/zone-pool.cc',
+        'compiler/zone-pool.h',
+        'compiler.cc',
+        'compiler.h',
+        'context-measure.cc',
+        'context-measure.h',
+        'contexts-inl.h',
+        'contexts.cc',
+        'contexts.h',
+        'conversions-inl.h',
+        'conversions.cc',
+        'conversions.h',
+        'counters-inl.h',
+        'counters.cc',
+        'counters.h',
+        'crankshaft/compilation-phase.cc',
+        'crankshaft/compilation-phase.h',
+        'crankshaft/hydrogen-alias-analysis.h',
+        'crankshaft/hydrogen-bce.cc',
+        'crankshaft/hydrogen-bce.h',
+        'crankshaft/hydrogen-canonicalize.cc',
+        'crankshaft/hydrogen-canonicalize.h',
+        'crankshaft/hydrogen-check-elimination.cc',
+        'crankshaft/hydrogen-check-elimination.h',
+        'crankshaft/hydrogen-dce.cc',
+        'crankshaft/hydrogen-dce.h',
+        'crankshaft/hydrogen-dehoist.cc',
+        'crankshaft/hydrogen-dehoist.h',
+        'crankshaft/hydrogen-environment-liveness.cc',
+        'crankshaft/hydrogen-environment-liveness.h',
+        'crankshaft/hydrogen-escape-analysis.cc',
+        'crankshaft/hydrogen-escape-analysis.h',
+        'crankshaft/hydrogen-flow-engine.h',
+        'crankshaft/hydrogen-gvn.cc',
+        'crankshaft/hydrogen-gvn.h',
+        'crankshaft/hydrogen-infer-representation.cc',
+        'crankshaft/hydrogen-infer-representation.h',
+        'crankshaft/hydrogen-infer-types.cc',
+        'crankshaft/hydrogen-infer-types.h',
+        'crankshaft/hydrogen-instructions.cc',
+        'crankshaft/hydrogen-instructions.h',
+        'crankshaft/hydrogen-load-elimination.cc',
+        'crankshaft/hydrogen-load-elimination.h',
+        'crankshaft/hydrogen-mark-deoptimize.cc',
+        'crankshaft/hydrogen-mark-deoptimize.h',
+        'crankshaft/hydrogen-mark-unreachable.cc',
+        'crankshaft/hydrogen-mark-unreachable.h',
+        'crankshaft/hydrogen-osr.cc',
+        'crankshaft/hydrogen-osr.h',
+        'crankshaft/hydrogen-range-analysis.cc',
+        'crankshaft/hydrogen-range-analysis.h',
+        'crankshaft/hydrogen-redundant-phi.cc',
+        'crankshaft/hydrogen-redundant-phi.h',
+        'crankshaft/hydrogen-removable-simulates.cc',
+        'crankshaft/hydrogen-removable-simulates.h',
+        'crankshaft/hydrogen-representation-changes.cc',
+        'crankshaft/hydrogen-representation-changes.h',
+        'crankshaft/hydrogen-sce.cc',
+        'crankshaft/hydrogen-sce.h',
+        'crankshaft/hydrogen-store-elimination.cc',
+        'crankshaft/hydrogen-store-elimination.h',
+        'crankshaft/hydrogen-types.cc',
+        'crankshaft/hydrogen-types.h',
+        'crankshaft/hydrogen-uint32-analysis.cc',
+        'crankshaft/hydrogen-uint32-analysis.h',
+        'crankshaft/hydrogen.cc',
+        'crankshaft/hydrogen.h',
+        'crankshaft/lithium-allocator-inl.h',
+        'crankshaft/lithium-allocator.cc',
+        'crankshaft/lithium-allocator.h',
+        'crankshaft/lithium-codegen.cc',
+        'crankshaft/lithium-codegen.h',
+        'crankshaft/lithium.cc',
+        'crankshaft/lithium.h',
+        'crankshaft/lithium-inl.h',
+        'crankshaft/typing.cc',
+        'crankshaft/typing.h',
+        'crankshaft/unique.h',
+        'date.cc',
+        'date.h',
+        'dateparser-inl.h',
+        'dateparser.cc',
+        'dateparser.h',
+        'debug/debug-evaluate.cc',
+        'debug/debug-evaluate.h',
+        'debug/debug-frames.cc',
+        'debug/debug-frames.h',
+        'debug/debug-scopes.cc',
+        'debug/debug-scopes.h',
+        'debug/debug.cc',
+        'debug/debug.h',
+        'debug/liveedit.cc',
+        'debug/liveedit.h',
+        'deoptimizer.cc',
+        'deoptimizer.h',
+        'disasm.h',
+        'disassembler.cc',
+        'disassembler.h',
+        'diy-fp.cc',
+        'diy-fp.h',
+        'double.h',
+        'dtoa.cc',
+        'dtoa.h',
+        'effects.h',
+        'elements-kind.cc',
+        'elements-kind.h',
+        'elements.cc',
+        'elements.h',
+        'execution.cc',
+        'execution.h',
+        'extensions/externalize-string-extension.cc',
+        'extensions/externalize-string-extension.h',
+        'extensions/free-buffer-extension.cc',
+        'extensions/free-buffer-extension.h',
+        'extensions/gc-extension.cc',
+        'extensions/gc-extension.h',
+        'extensions/ignition-statistics-extension.cc',
+        'extensions/ignition-statistics-extension.h',
+        'extensions/statistics-extension.cc',
+        'extensions/statistics-extension.h',
+        'extensions/trigger-failure-extension.cc',
+        'extensions/trigger-failure-extension.h',
+        'external-reference-table.cc',
+        'external-reference-table.h',
+        'factory.cc',
+        'factory.h',
+        'fast-accessor-assembler.cc',
+        'fast-accessor-assembler.h',
+        'fast-dtoa.cc',
+        'fast-dtoa.h',
+        'field-index.h',
+        'field-index-inl.h',
+        'field-type.cc',
+        'field-type.h',
+        'fixed-dtoa.cc',
+        'fixed-dtoa.h',
+        'flag-definitions.h',
+        'flags.cc',
+        'flags.h',
+        'frames-inl.h',
+        'frames.cc',
+        'frames.h',
+        'full-codegen/full-codegen.cc',
+        'full-codegen/full-codegen.h',
+        'futex-emulation.cc',
+        'futex-emulation.h',
+        'gdb-jit.cc',
+        'gdb-jit.h',
+        'global-handles.cc',
+        'global-handles.h',
+        'globals.h',
+        'handles-inl.h',
+        'handles.cc',
+        'handles.h',
+        'hashmap.h',
+        'heap-symbols.h',
+        'heap/array-buffer-tracker.cc',
+        'heap/array-buffer-tracker.h',
+        'heap/memory-reducer.cc',
+        'heap/memory-reducer.h',
+        'heap/gc-idle-time-handler.cc',
+        'heap/gc-idle-time-handler.h',
+        'heap/gc-tracer.cc',
+        'heap/gc-tracer.h',
+        'heap/heap-inl.h',
+        'heap/heap.cc',
+        'heap/heap.h',
+        'heap/incremental-marking-inl.h',
+        'heap/incremental-marking-job.cc',
+        'heap/incremental-marking-job.h',
+        'heap/incremental-marking.cc',
+        'heap/incremental-marking.h',
+        'heap/mark-compact-inl.h',
+        'heap/mark-compact.cc',
+        'heap/mark-compact.h',
+        'heap/object-stats.cc',
+        'heap/object-stats.h',
+        'heap/objects-visiting-inl.h',
+        'heap/objects-visiting.cc',
+        'heap/objects-visiting.h',
+        'heap/page-parallel-job.h',
+        'heap/remembered-set.cc',
+        'heap/remembered-set.h',
+        'heap/scavenge-job.h',
+        'heap/scavenge-job.cc',
+        'heap/scavenger-inl.h',
+        'heap/scavenger.cc',
+        'heap/scavenger.h',
+        'heap/slot-set.h',
+        'heap/spaces-inl.h',
+        'heap/spaces.cc',
+        'heap/spaces.h',
+        'heap/store-buffer.cc',
+        'heap/store-buffer.h',
+        'i18n.cc',
+        'i18n.h',
+        'icu_util.cc',
+        'icu_util.h',
+        'ic/access-compiler.cc',
+        'ic/access-compiler.h',
+        'ic/call-optimization.cc',
+        'ic/call-optimization.h',
+        'ic/handler-compiler.cc',
+        'ic/handler-compiler.h',
+        'ic/ic-inl.h',
+        'ic/ic-state.cc',
+        'ic/ic-state.h',
+        'ic/ic.cc',
+        'ic/ic.h',
+        'ic/ic-compiler.cc',
+        'ic/ic-compiler.h',
+        'identity-map.cc',
+        'identity-map.h',
+        'interface-descriptors.cc',
+        'interface-descriptors.h',
+        'interpreter/bytecodes.cc',
+        'interpreter/bytecodes.h',
+        'interpreter/bytecode-array-builder.cc',
+        'interpreter/bytecode-array-builder.h',
+        'interpreter/bytecode-array-iterator.cc',
+        'interpreter/bytecode-array-iterator.h',
+        'interpreter/bytecode-array-writer.cc',
+        'interpreter/bytecode-array-writer.h',
+        'interpreter/bytecode-peephole-optimizer.cc',
+        'interpreter/bytecode-peephole-optimizer.h',
+        'interpreter/bytecode-pipeline.cc',
+        'interpreter/bytecode-pipeline.h',
+        'interpreter/bytecode-register-allocator.cc',
+        'interpreter/bytecode-register-allocator.h',
+        'interpreter/bytecode-generator.cc',
+        'interpreter/bytecode-generator.h',
+        'interpreter/bytecode-traits.h',
+        'interpreter/constant-array-builder.cc',
+        'interpreter/constant-array-builder.h',
+        'interpreter/control-flow-builders.cc',
+        'interpreter/control-flow-builders.h',
+        'interpreter/handler-table-builder.cc',
+        'interpreter/handler-table-builder.h',
+        'interpreter/interpreter.cc',
+        'interpreter/interpreter.h',
+        'interpreter/interpreter-assembler.cc',
+        'interpreter/interpreter-assembler.h',
+        'interpreter/interpreter-intrinsics.cc',
+        'interpreter/interpreter-intrinsics.h',
+        'interpreter/source-position-table.cc',
+        'interpreter/source-position-table.h',
+        'isolate-inl.h',
+        'isolate.cc',
+        'isolate.h',
+        'json-parser.h',
+        'json-stringifier.h',
+        'keys.h',
+        'keys.cc',
+        'layout-descriptor-inl.h',
+        'layout-descriptor.cc',
+        'layout-descriptor.h',
+        'list-inl.h',
+        'list.h',
+        'locked-queue-inl.h',
+        'locked-queue.h',
+        'log-inl.h',
+        'log-utils.cc',
+        'log-utils.h',
+        'log.cc',
+        'log.h',
+        'lookup.cc',
+        'lookup.h',
+        'macro-assembler.h',
+        'machine-type.cc',
+        'machine-type.h',
+        'messages.cc',
+        'messages.h',
+        'msan.h',
+        'objects-body-descriptors-inl.h',
+        'objects-body-descriptors.h',
+        'objects-debug.cc',
+        'objects-inl.h',
+        'objects-printer.cc',
+        'objects.cc',
+        'objects.h',
+        'optimizing-compile-dispatcher.cc',
+        'optimizing-compile-dispatcher.h',
+        'ostreams.cc',
+        'ostreams.h',
+        'parsing/expression-classifier.h',
+        'parsing/func-name-inferrer.cc',
+        'parsing/func-name-inferrer.h',
+        'parsing/parameter-initializer-rewriter.cc',
+        'parsing/parameter-initializer-rewriter.h',
+        'parsing/parser-base.h',
+        'parsing/parser.cc',
+        'parsing/parser.h',
+        'parsing/pattern-rewriter.cc',
+        'parsing/preparse-data-format.h',
+        'parsing/preparse-data.cc',
+        'parsing/preparse-data.h',
+        'parsing/preparser.cc',
+        'parsing/preparser.h',
+        'parsing/rewriter.cc',
+        'parsing/rewriter.h',
+        'parsing/scanner-character-streams.cc',
+        'parsing/scanner-character-streams.h',
+        'parsing/scanner.cc',
+        'parsing/scanner.h',
+        'parsing/token.cc',
+        'parsing/token.h',
+        'pending-compilation-error-handler.cc',
+        'pending-compilation-error-handler.h',
+        'perf-jit.cc',
+        'perf-jit.h',
+        'profiler/allocation-tracker.cc',
+        'profiler/allocation-tracker.h',
+        'profiler/circular-queue-inl.h',
+        'profiler/circular-queue.h',
+        'profiler/cpu-profiler-inl.h',
+        'profiler/cpu-profiler.cc',
+        'profiler/cpu-profiler.h',
+        'profiler/heap-profiler.cc',
+        'profiler/heap-profiler.h',
+        'profiler/heap-snapshot-generator-inl.h',
+        'profiler/heap-snapshot-generator.cc',
+        'profiler/heap-snapshot-generator.h',
+        'profiler/profile-generator-inl.h',
+        'profiler/profile-generator.cc',
+        'profiler/profile-generator.h',
+        'profiler/sampler.cc',
+        'profiler/sampler.h',
+        'profiler/sampling-heap-profiler.cc',
+        'profiler/sampling-heap-profiler.h',
+        'profiler/strings-storage.cc',
+        'profiler/strings-storage.h',
+        'profiler/tick-sample.cc',
+        'profiler/tick-sample.h',
+        'profiler/unbound-queue-inl.h',
+        'profiler/unbound-queue.h',
+        'property-descriptor.cc',
+        'property-descriptor.h',
+        'property-details.h',
+        'property.cc',
+        'property.h',
+        'prototype.h',
+        'regexp/bytecodes-irregexp.h',
+        'regexp/interpreter-irregexp.cc',
+        'regexp/interpreter-irregexp.h',
+        'regexp/jsregexp-inl.h',
+        'regexp/jsregexp.cc',
+        'regexp/jsregexp.h',
+        'regexp/regexp-ast.cc',
+        'regexp/regexp-ast.h',
+        'regexp/regexp-macro-assembler-irregexp-inl.h',
+        'regexp/regexp-macro-assembler-irregexp.cc',
+        'regexp/regexp-macro-assembler-irregexp.h',
+        'regexp/regexp-macro-assembler-tracer.cc',
+        'regexp/regexp-macro-assembler-tracer.h',
+        'regexp/regexp-macro-assembler.cc',
+        'regexp/regexp-macro-assembler.h',
+        'regexp/regexp-parser.cc',
+        'regexp/regexp-parser.h',
+        'regexp/regexp-stack.cc',
+        'regexp/regexp-stack.h',
+        'register-configuration.cc',
+        'register-configuration.h',
+        'runtime-profiler.cc',
+        'runtime-profiler.h',
+        'runtime/runtime-array.cc',
+        'runtime/runtime-atomics.cc',
+        'runtime/runtime-classes.cc',
+        'runtime/runtime-collections.cc',
+        'runtime/runtime-compiler.cc',
+        'runtime/runtime-date.cc',
+        'runtime/runtime-debug.cc',
+        'runtime/runtime-forin.cc',
+        'runtime/runtime-function.cc',
+        'runtime/runtime-futex.cc',
+        'runtime/runtime-generator.cc',
+        'runtime/runtime-i18n.cc',
+        'runtime/runtime-internal.cc',
+        'runtime/runtime-interpreter.cc',
+        'runtime/runtime-json.cc',
+        'runtime/runtime-literals.cc',
+        'runtime/runtime-liveedit.cc',
+        'runtime/runtime-maths.cc',
+        'runtime/runtime-numbers.cc',
+        'runtime/runtime-object.cc',
+        'runtime/runtime-operators.cc',
+        'runtime/runtime-proxy.cc',
+        'runtime/runtime-regexp.cc',
+        'runtime/runtime-scopes.cc',
+        'runtime/runtime-simd.cc',
+        'runtime/runtime-strings.cc',
+        'runtime/runtime-symbol.cc',
+        'runtime/runtime-test.cc',
+        'runtime/runtime-typedarray.cc',
+        'runtime/runtime-uri.cc',
+        'runtime/runtime-utils.h',
+        'runtime/runtime.cc',
+        'runtime/runtime.h',
+        'safepoint-table.cc',
+        'safepoint-table.h',
+        'signature.h',
+        'simulator.h',
+        'small-pointer-list.h',
+        'snapshot/code-serializer.cc',
+        'snapshot/code-serializer.h',
+        'snapshot/deserializer.cc',
+        'snapshot/deserializer.h',
+        'snapshot/natives.h',
+        'snapshot/natives-common.cc',
+        'snapshot/partial-serializer.cc',
+        'snapshot/partial-serializer.h',
+        'snapshot/serializer.cc',
+        'snapshot/serializer.h',
+        'snapshot/serializer-common.cc',
+        'snapshot/serializer-common.h',
+        'snapshot/snapshot.h',
+        'snapshot/snapshot-common.cc',
+        'snapshot/snapshot-source-sink.cc',
+        'snapshot/snapshot-source-sink.h',
+        'snapshot/startup-serializer.cc',
+        'snapshot/startup-serializer.h',
+        'source-position.h',
+        'splay-tree.h',
+        'splay-tree-inl.h',
+        'startup-data-util.cc',
+        'startup-data-util.h',
+        'string-builder.cc',
+        'string-builder.h',
+        'string-search.h',
+        'string-stream.cc',
+        'string-stream.h',
+        'strtod.cc',
+        'strtod.h',
+        'ic/stub-cache.cc',
+        'ic/stub-cache.h',
+        'tracing/trace-event.cc',
+        'tracing/trace-event.h',
+        'transitions-inl.h',
+        'transitions.cc',
+        'transitions.h',
+        'type-cache.cc',
+        'type-cache.h',
+        'type-feedback-vector-inl.h',
+        'type-feedback-vector.cc',
+        'type-feedback-vector.h',
+        'type-info.cc',
+        'type-info.h',
+        'types.cc',
+        'types.h',
+        'typing-asm.cc',
+        'typing-asm.h',
+        'unicode-inl.h',
+        'unicode.cc',
+        'unicode.h',
+        'unicode-cache-inl.h',
+        'unicode-cache.h',
+        'unicode-decoder.cc',
+        'unicode-decoder.h',
+        'uri.cc',
+        'uri.h',
+        'utils-inl.h',
+        'utils.cc',
+        'utils.h',
+        'v8.cc',
+        'v8.h',
+        'v8memory.h',
+        'v8threads.cc',
+        'v8threads.h',
+        'vector.h',
+        'version.cc',
+        'version.h',
+        'vm-state-inl.h',
+        'vm-state.h',
+        'wasm/asm-wasm-builder.cc',
+        'wasm/asm-wasm-builder.h',
+        'wasm/ast-decoder.cc',
+        'wasm/ast-decoder.h',
+        'wasm/decoder.h',
+        'wasm/encoder.cc',
+        'wasm/encoder.h',
+        'wasm/leb-helper.h',
+        'wasm/module-decoder.cc',
+        'wasm/module-decoder.h',
+        'wasm/switch-logic.h',
+        'wasm/switch-logic.cc',
+        'wasm/wasm-external-refs.cc',
+        'wasm/wasm-external-refs.h',
+        'wasm/wasm-function-name-table.cc',
+        'wasm/wasm-function-name-table.h',
+        'wasm/wasm-js.cc',
+        'wasm/wasm-js.h',
+        'wasm/wasm-macro-gen.h',
+        'wasm/wasm-module.cc',
+        'wasm/wasm-module.h',
+        'wasm/wasm-opcodes.cc',
+        'wasm/wasm-opcodes.h',
+        'wasm/wasm-result.cc',
+        'wasm/wasm-result.h',
+        'zone.cc',
+        'zone.h',
+        'zone-allocator.h',
+        'zone-containers.h',
+        'third_party/fdlibm/fdlibm.cc',
+        'third_party/fdlibm/fdlibm.h',
+      ],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+        ['v8_target_arch=="arm"', {
+          'sources': [  ### gcmole(arch:arm) ###
+            'arm/assembler-arm-inl.h',
+            'arm/assembler-arm.cc',
+            'arm/assembler-arm.h',
+            'arm/builtins-arm.cc',
+            'arm/code-stubs-arm.cc',
+            'arm/code-stubs-arm.h',
+            'arm/codegen-arm.cc',
+            'arm/codegen-arm.h',
+            'arm/constants-arm.h',
+            'arm/constants-arm.cc',
+            'arm/cpu-arm.cc',
+            'arm/deoptimizer-arm.cc',
+            'arm/disasm-arm.cc',
+            'arm/frames-arm.cc',
+            'arm/frames-arm.h',
+            'arm/interface-descriptors-arm.cc',
+            'arm/interface-descriptors-arm.h',
+            'arm/macro-assembler-arm.cc',
+            'arm/macro-assembler-arm.h',
+            'arm/simulator-arm.cc',
+            'arm/simulator-arm.h',
+            'compiler/arm/code-generator-arm.cc',
+            'compiler/arm/instruction-codes-arm.h',
+            'compiler/arm/instruction-scheduler-arm.cc',
+            'compiler/arm/instruction-selector-arm.cc',
+            'crankshaft/arm/lithium-arm.cc',
+            'crankshaft/arm/lithium-arm.h',
+            'crankshaft/arm/lithium-codegen-arm.cc',
+            'crankshaft/arm/lithium-codegen-arm.h',
+            'crankshaft/arm/lithium-gap-resolver-arm.cc',
+            'crankshaft/arm/lithium-gap-resolver-arm.h',
+            'debug/arm/debug-arm.cc',
+            'full-codegen/arm/full-codegen-arm.cc',
+            'ic/arm/access-compiler-arm.cc',
+            'ic/arm/handler-compiler-arm.cc',
+            'ic/arm/ic-arm.cc',
+            'ic/arm/ic-compiler-arm.cc',
+            'ic/arm/stub-cache-arm.cc',
+            'regexp/arm/regexp-macro-assembler-arm.cc',
+            'regexp/arm/regexp-macro-assembler-arm.h',
+          ],
+        }],
+        ['v8_target_arch=="arm64"', {
+          'sources': [  ### gcmole(arch:arm64) ###
+            'arm64/assembler-arm64.cc',
+            'arm64/assembler-arm64.h',
+            'arm64/assembler-arm64-inl.h',
+            'arm64/builtins-arm64.cc',
+            'arm64/codegen-arm64.cc',
+            'arm64/codegen-arm64.h',
+            'arm64/code-stubs-arm64.cc',
+            'arm64/code-stubs-arm64.h',
+            'arm64/constants-arm64.h',
+            'arm64/cpu-arm64.cc',
+            'arm64/decoder-arm64.cc',
+            'arm64/decoder-arm64.h',
+            'arm64/decoder-arm64-inl.h',
+            'arm64/deoptimizer-arm64.cc',
+            'arm64/disasm-arm64.cc',
+            'arm64/disasm-arm64.h',
+            'arm64/frames-arm64.cc',
+            'arm64/frames-arm64.h',
+            'arm64/instructions-arm64.cc',
+            'arm64/instructions-arm64.h',
+            'arm64/instrument-arm64.cc',
+            'arm64/instrument-arm64.h',
+            'arm64/interface-descriptors-arm64.cc',
+            'arm64/interface-descriptors-arm64.h',
+            'arm64/macro-assembler-arm64.cc',
+            'arm64/macro-assembler-arm64.h',
+            'arm64/macro-assembler-arm64-inl.h',
+            'arm64/simulator-arm64.cc',
+            'arm64/simulator-arm64.h',
+            'arm64/utils-arm64.cc',
+            'arm64/utils-arm64.h',
+            'compiler/arm64/code-generator-arm64.cc',
+            'compiler/arm64/instruction-codes-arm64.h',
+            'compiler/arm64/instruction-scheduler-arm64.cc',
+            'compiler/arm64/instruction-selector-arm64.cc',
+            'crankshaft/arm64/delayed-masm-arm64.cc',
+            'crankshaft/arm64/delayed-masm-arm64.h',
+            'crankshaft/arm64/delayed-masm-arm64-inl.h',
+            'crankshaft/arm64/lithium-arm64.cc',
+            'crankshaft/arm64/lithium-arm64.h',
+            'crankshaft/arm64/lithium-codegen-arm64.cc',
+            'crankshaft/arm64/lithium-codegen-arm64.h',
+            'crankshaft/arm64/lithium-gap-resolver-arm64.cc',
+            'crankshaft/arm64/lithium-gap-resolver-arm64.h',
+            'debug/arm64/debug-arm64.cc',
+            'full-codegen/arm64/full-codegen-arm64.cc',
+            'ic/arm64/access-compiler-arm64.cc',
+            'ic/arm64/handler-compiler-arm64.cc',
+            'ic/arm64/ic-arm64.cc',
+            'ic/arm64/ic-compiler-arm64.cc',
+            'ic/arm64/stub-cache-arm64.cc',
+            'regexp/arm64/regexp-macro-assembler-arm64.cc',
+            'regexp/arm64/regexp-macro-assembler-arm64.h',
+          ],
+        }],
+        ['v8_target_arch=="ia32"', {
+          'sources': [  ### gcmole(arch:ia32) ###
+            'ia32/assembler-ia32-inl.h',
+            'ia32/assembler-ia32.cc',
+            'ia32/assembler-ia32.h',
+            'ia32/builtins-ia32.cc',
+            'ia32/code-stubs-ia32.cc',
+            'ia32/code-stubs-ia32.h',
+            'ia32/codegen-ia32.cc',
+            'ia32/codegen-ia32.h',
+            'ia32/cpu-ia32.cc',
+            'ia32/deoptimizer-ia32.cc',
+            'ia32/disasm-ia32.cc',
+            'ia32/frames-ia32.cc',
+            'ia32/frames-ia32.h',
+            'ia32/interface-descriptors-ia32.cc',
+            'ia32/macro-assembler-ia32.cc',
+            'ia32/macro-assembler-ia32.h',
+            'compiler/ia32/code-generator-ia32.cc',
+            'compiler/ia32/instruction-codes-ia32.h',
+            'compiler/ia32/instruction-scheduler-ia32.cc',
+            'compiler/ia32/instruction-selector-ia32.cc',
+            'crankshaft/ia32/lithium-codegen-ia32.cc',
+            'crankshaft/ia32/lithium-codegen-ia32.h',
+            'crankshaft/ia32/lithium-gap-resolver-ia32.cc',
+            'crankshaft/ia32/lithium-gap-resolver-ia32.h',
+            'crankshaft/ia32/lithium-ia32.cc',
+            'crankshaft/ia32/lithium-ia32.h',
+            'debug/ia32/debug-ia32.cc',
+            'full-codegen/ia32/full-codegen-ia32.cc',
+            'ic/ia32/access-compiler-ia32.cc',
+            'ic/ia32/handler-compiler-ia32.cc',
+            'ic/ia32/ic-ia32.cc',
+            'ic/ia32/ic-compiler-ia32.cc',
+            'ic/ia32/stub-cache-ia32.cc',
+            'regexp/ia32/regexp-macro-assembler-ia32.cc',
+            'regexp/ia32/regexp-macro-assembler-ia32.h',
+          ],
+        }],
+        ['v8_target_arch=="x87"', {
+          'sources': [  ### gcmole(arch:x87) ###
+            'x87/assembler-x87-inl.h',
+            'x87/assembler-x87.cc',
+            'x87/assembler-x87.h',
+            'x87/builtins-x87.cc',
+            'x87/code-stubs-x87.cc',
+            'x87/code-stubs-x87.h',
+            'x87/codegen-x87.cc',
+            'x87/codegen-x87.h',
+            'x87/cpu-x87.cc',
+            'x87/deoptimizer-x87.cc',
+            'x87/disasm-x87.cc',
+            'x87/frames-x87.cc',
+            'x87/frames-x87.h',
+            'x87/interface-descriptors-x87.cc',
+            'x87/macro-assembler-x87.cc',
+            'x87/macro-assembler-x87.h',
+            'compiler/x87/code-generator-x87.cc',
+            'compiler/x87/instruction-codes-x87.h',
+            'compiler/x87/instruction-scheduler-x87.cc',
+            'compiler/x87/instruction-selector-x87.cc',
+            'crankshaft/x87/lithium-codegen-x87.cc',
+            'crankshaft/x87/lithium-codegen-x87.h',
+            'crankshaft/x87/lithium-gap-resolver-x87.cc',
+            'crankshaft/x87/lithium-gap-resolver-x87.h',
+            'crankshaft/x87/lithium-x87.cc',
+            'crankshaft/x87/lithium-x87.h',
+            'debug/x87/debug-x87.cc',
+            'full-codegen/x87/full-codegen-x87.cc',
+            'ic/x87/access-compiler-x87.cc',
+            'ic/x87/handler-compiler-x87.cc',
+            'ic/x87/ic-x87.cc',
+            'ic/x87/ic-compiler-x87.cc',
+            'ic/x87/stub-cache-x87.cc',
+            'regexp/x87/regexp-macro-assembler-x87.cc',
+            'regexp/x87/regexp-macro-assembler-x87.h',
+          ],
+        }],
+        ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
+          'sources': [  ### gcmole(arch:mipsel) ###
+            'mips/assembler-mips.cc',
+            'mips/assembler-mips.h',
+            'mips/assembler-mips-inl.h',
+            'mips/builtins-mips.cc',
+            'mips/codegen-mips.cc',
+            'mips/codegen-mips.h',
+            'mips/code-stubs-mips.cc',
+            'mips/code-stubs-mips.h',
+            'mips/constants-mips.cc',
+            'mips/constants-mips.h',
+            'mips/cpu-mips.cc',
+            'mips/deoptimizer-mips.cc',
+            'mips/disasm-mips.cc',
+            'mips/frames-mips.cc',
+            'mips/frames-mips.h',
+            'mips/interface-descriptors-mips.cc',
+            'mips/macro-assembler-mips.cc',
+            'mips/macro-assembler-mips.h',
+            'mips/simulator-mips.cc',
+            'mips/simulator-mips.h',
+            'compiler/mips/code-generator-mips.cc',
+            'compiler/mips/instruction-codes-mips.h',
+            'compiler/mips/instruction-scheduler-mips.cc',
+            'compiler/mips/instruction-selector-mips.cc',
+            'crankshaft/mips/lithium-codegen-mips.cc',
+            'crankshaft/mips/lithium-codegen-mips.h',
+            'crankshaft/mips/lithium-gap-resolver-mips.cc',
+            'crankshaft/mips/lithium-gap-resolver-mips.h',
+            'crankshaft/mips/lithium-mips.cc',
+            'crankshaft/mips/lithium-mips.h',
+            'full-codegen/mips/full-codegen-mips.cc',
+            'debug/mips/debug-mips.cc',
+            'ic/mips/access-compiler-mips.cc',
+            'ic/mips/handler-compiler-mips.cc',
+            'ic/mips/ic-mips.cc',
+            'ic/mips/ic-compiler-mips.cc',
+            'ic/mips/stub-cache-mips.cc',
+            'regexp/mips/regexp-macro-assembler-mips.cc',
+            'regexp/mips/regexp-macro-assembler-mips.h',
+          ],
+        }],
+        ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
+          'sources': [  ### gcmole(arch:mips64el) ###
+            'mips64/assembler-mips64.cc',
+            'mips64/assembler-mips64.h',
+            'mips64/assembler-mips64-inl.h',
+            'mips64/builtins-mips64.cc',
+            'mips64/codegen-mips64.cc',
+            'mips64/codegen-mips64.h',
+            'mips64/code-stubs-mips64.cc',
+            'mips64/code-stubs-mips64.h',
+            'mips64/constants-mips64.cc',
+            'mips64/constants-mips64.h',
+            'mips64/cpu-mips64.cc',
+            'mips64/deoptimizer-mips64.cc',
+            'mips64/disasm-mips64.cc',
+            'mips64/frames-mips64.cc',
+            'mips64/frames-mips64.h',
+            'mips64/interface-descriptors-mips64.cc',
+            'mips64/macro-assembler-mips64.cc',
+            'mips64/macro-assembler-mips64.h',
+            'mips64/simulator-mips64.cc',
+            'mips64/simulator-mips64.h',
+            'compiler/mips64/code-generator-mips64.cc',
+            'compiler/mips64/instruction-codes-mips64.h',
+            'compiler/mips64/instruction-scheduler-mips64.cc',
+            'compiler/mips64/instruction-selector-mips64.cc',
+            'crankshaft/mips64/lithium-codegen-mips64.cc',
+            'crankshaft/mips64/lithium-codegen-mips64.h',
+            'crankshaft/mips64/lithium-gap-resolver-mips64.cc',
+            'crankshaft/mips64/lithium-gap-resolver-mips64.h',
+            'crankshaft/mips64/lithium-mips64.cc',
+            'crankshaft/mips64/lithium-mips64.h',
+            'debug/mips64/debug-mips64.cc',
+            'full-codegen/mips64/full-codegen-mips64.cc',
+            'ic/mips64/access-compiler-mips64.cc',
+            'ic/mips64/handler-compiler-mips64.cc',
+            'ic/mips64/ic-mips64.cc',
+            'ic/mips64/ic-compiler-mips64.cc',
+            'ic/mips64/stub-cache-mips64.cc',
+            'regexp/mips64/regexp-macro-assembler-mips64.cc',
+            'regexp/mips64/regexp-macro-assembler-mips64.h',
+          ],
+        }],
+        ['v8_target_arch=="x64" or v8_target_arch=="x32"', {
+          'sources': [  ### gcmole(arch:x64) ###
+            'crankshaft/x64/lithium-codegen-x64.cc',
+            'crankshaft/x64/lithium-codegen-x64.h',
+            'crankshaft/x64/lithium-gap-resolver-x64.cc',
+            'crankshaft/x64/lithium-gap-resolver-x64.h',
+            'crankshaft/x64/lithium-x64.cc',
+            'crankshaft/x64/lithium-x64.h',
+            'x64/assembler-x64-inl.h',
+            'x64/assembler-x64.cc',
+            'x64/assembler-x64.h',
+            'x64/builtins-x64.cc',
+            'x64/code-stubs-x64.cc',
+            'x64/code-stubs-x64.h',
+            'x64/codegen-x64.cc',
+            'x64/codegen-x64.h',
+            'x64/cpu-x64.cc',
+            'x64/deoptimizer-x64.cc',
+            'x64/disasm-x64.cc',
+            'x64/frames-x64.cc',
+            'x64/frames-x64.h',
+            'x64/interface-descriptors-x64.cc',
+            'x64/macro-assembler-x64.cc',
+            'x64/macro-assembler-x64.h',
+            'debug/x64/debug-x64.cc',
+            'full-codegen/x64/full-codegen-x64.cc',
+            'ic/x64/access-compiler-x64.cc',
+            'ic/x64/handler-compiler-x64.cc',
+            'ic/x64/ic-x64.cc',
+            'ic/x64/ic-compiler-x64.cc',
+            'ic/x64/stub-cache-x64.cc',
+            'regexp/x64/regexp-macro-assembler-x64.cc',
+            'regexp/x64/regexp-macro-assembler-x64.h',
+          ],
+        }],
+        ['v8_target_arch=="x64"', {
+          'sources': [
+            'compiler/x64/code-generator-x64.cc',
+            'compiler/x64/instruction-codes-x64.h',
+            'compiler/x64/instruction-scheduler-x64.cc',
+            'compiler/x64/instruction-selector-x64.cc',
+          ],
+        }],
+        ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+          'sources': [  ### gcmole(arch:ppc) ###
+            'compiler/ppc/code-generator-ppc.cc',
+            'compiler/ppc/instruction-codes-ppc.h',
+            'compiler/ppc/instruction-scheduler-ppc.cc',
+            'compiler/ppc/instruction-selector-ppc.cc',
+            'crankshaft/ppc/lithium-ppc.cc',
+            'crankshaft/ppc/lithium-ppc.h',
+            'crankshaft/ppc/lithium-codegen-ppc.cc',
+            'crankshaft/ppc/lithium-codegen-ppc.h',
+            'crankshaft/ppc/lithium-gap-resolver-ppc.cc',
+            'crankshaft/ppc/lithium-gap-resolver-ppc.h',
+            'debug/ppc/debug-ppc.cc',
+            'full-codegen/ppc/full-codegen-ppc.cc',
+            'ic/ppc/access-compiler-ppc.cc',
+            'ic/ppc/handler-compiler-ppc.cc',
+            'ic/ppc/ic-ppc.cc',
+            'ic/ppc/ic-compiler-ppc.cc',
+            'ic/ppc/stub-cache-ppc.cc',
+            'ppc/assembler-ppc-inl.h',
+            'ppc/assembler-ppc.cc',
+            'ppc/assembler-ppc.h',
+            'ppc/builtins-ppc.cc',
+            'ppc/code-stubs-ppc.cc',
+            'ppc/code-stubs-ppc.h',
+            'ppc/codegen-ppc.cc',
+            'ppc/codegen-ppc.h',
+            'ppc/constants-ppc.h',
+            'ppc/constants-ppc.cc',
+            'ppc/cpu-ppc.cc',
+            'ppc/deoptimizer-ppc.cc',
+            'ppc/disasm-ppc.cc',
+            'ppc/frames-ppc.cc',
+            'ppc/frames-ppc.h',
+            'ppc/interface-descriptors-ppc.cc',
+            'ppc/macro-assembler-ppc.cc',
+            'ppc/macro-assembler-ppc.h',
+            'ppc/simulator-ppc.cc',
+            'ppc/simulator-ppc.h',
+            'regexp/ppc/regexp-macro-assembler-ppc.cc',
+            'regexp/ppc/regexp-macro-assembler-ppc.h',
+          ],
+        }],
+        ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+          'sources': [  ### gcmole(arch:s390) ###
+            'compiler/s390/code-generator-s390.cc',
+            'compiler/s390/instruction-codes-s390.h',
+            'compiler/s390/instruction-scheduler-s390.cc',
+            'compiler/s390/instruction-selector-s390.cc',
+            'crankshaft/s390/lithium-codegen-s390.cc',
+            'crankshaft/s390/lithium-codegen-s390.h',
+            'crankshaft/s390/lithium-gap-resolver-s390.cc',
+            'crankshaft/s390/lithium-gap-resolver-s390.h',
+            'crankshaft/s390/lithium-s390.cc',
+            'crankshaft/s390/lithium-s390.h',
+            'debug/s390/debug-s390.cc',
+            'full-codegen/s390/full-codegen-s390.cc',
+            'ic/s390/access-compiler-s390.cc',
+            'ic/s390/handler-compiler-s390.cc',
+            'ic/s390/ic-compiler-s390.cc',
+            'ic/s390/ic-s390.cc',
+            'ic/s390/stub-cache-s390.cc',
+            'regexp/s390/regexp-macro-assembler-s390.cc',
+            'regexp/s390/regexp-macro-assembler-s390.h',
+            's390/assembler-s390.cc',
+            's390/assembler-s390.h',
+            's390/assembler-s390-inl.h',
+            's390/builtins-s390.cc',
+            's390/codegen-s390.cc',
+            's390/codegen-s390.h',
+            's390/code-stubs-s390.cc',
+            's390/code-stubs-s390.h',
+            's390/constants-s390.cc',
+            's390/constants-s390.h',
+            's390/cpu-s390.cc',
+            's390/deoptimizer-s390.cc',
+            's390/disasm-s390.cc',
+            's390/frames-s390.cc',
+            's390/frames-s390.h',
+            's390/interface-descriptors-s390.cc',
+            's390/macro-assembler-s390.cc',
+            's390/macro-assembler-s390.h',
+            's390/simulator-s390.cc',
+            's390/simulator-s390.h',
+          ],
+        }],
+        ['OS=="win"', {
+          'variables': {
+            'gyp_generators': '<!(echo $GYP_GENERATORS)',
+          },
+          'msvs_disabled_warnings': [4351, 4355, 4800],
+          # When building Official, the .lib is too large and exceeds the 2G
+          # limit. This breaks it into multiple pieces to avoid the limit.
+          # See http://crbug.com/485155.
+          'msvs_shard': 4,
+        }],
+        ['component=="shared_library"', {
+          'defines': [
+            'BUILDING_V8_SHARED',
+            'V8_SHARED',
+          ],
+        }],
+        ['v8_postmortem_support=="true"', {
+          'sources': [
+            '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+          ]
+        }],
+        ['v8_enable_i18n_support==1', {
+          'dependencies': [
+            '<(icu_gyp_path):icui18n',
+            '<(icu_gyp_path):icuuc',
+          ],
+          'conditions': [
+            ['icu_use_data_file_flag==1', {
+              'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
+            }, { # else icu_use_data_file_flag !=1
+              'conditions': [
+                ['OS=="win"', {
+                  'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
+                }, {
+                  'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
+                }],
+              ],
+            }],
+          ],
+        }, {  # v8_enable_i18n_support==0
+          'sources!': [
+            'i18n.cc',
+            'i18n.h',
+          ],
+        }],
+        ['OS=="win" and v8_enable_i18n_support==1', {
+          'dependencies': [
+            '<(icu_gyp_path):icudata',
+          ],
+        }],
+      ],
+    },
+    {
+      'target_name': 'v8_libbase',
+      'type': 'static_library',
+      'variables': {
+        'optimize': 'max',
+      },
+      'include_dirs+': [
+        '..',
+      ],
+      'sources': [
+        'base/accounting-allocator.cc',
+        'base/accounting-allocator.h',
+        'base/adapters.h',
+        'base/atomic-utils.h',
+        'base/atomicops.h',
+        'base/atomicops_internals_arm64_gcc.h',
+        'base/atomicops_internals_arm_gcc.h',
+        'base/atomicops_internals_atomicword_compat.h',
+        'base/atomicops_internals_mac.h',
+        'base/atomicops_internals_mips_gcc.h',
+        'base/atomicops_internals_mips64_gcc.h',
+        'base/atomicops_internals_portable.h',
+        'base/atomicops_internals_ppc_gcc.h',
+        'base/atomicops_internals_s390_gcc.h',
+        'base/atomicops_internals_tsan.h',
+        'base/atomicops_internals_x86_gcc.cc',
+        'base/atomicops_internals_x86_gcc.h',
+        'base/atomicops_internals_x86_msvc.h',
+        'base/bits.cc',
+        'base/bits.h',
+        'base/build_config.h',
+        'base/compiler-specific.h',
+        'base/cpu.cc',
+        'base/cpu.h',
+        'base/division-by-constant.cc',
+        'base/division-by-constant.h',
+        'base/flags.h',
+        'base/format-macros.h',
+        'base/functional.cc',
+        'base/functional.h',
+        'base/iterator.h',
+        'base/lazy-instance.h',
+        'base/logging.cc',
+        'base/logging.h',
+        'base/macros.h',
+        'base/once.cc',
+        'base/once.h',
+        'base/platform/elapsed-timer.h',
+        'base/platform/time.cc',
+        'base/platform/time.h',
+        'base/platform/condition-variable.cc',
+        'base/platform/condition-variable.h',
+        'base/platform/mutex.cc',
+        'base/platform/mutex.h',
+        'base/platform/platform.h',
+        'base/platform/semaphore.cc',
+        'base/platform/semaphore.h',
+        'base/safe_conversions.h',
+        'base/safe_conversions_impl.h',
+        'base/safe_math.h',
+        'base/safe_math_impl.h',
+        'base/smart-pointers.h',
+        'base/sys-info.cc',
+        'base/sys-info.h',
+        'base/utils/random-number-generator.cc',
+        'base/utils/random-number-generator.h',
+      ],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+        ['OS=="linux"', {
+            'conditions': [
+              ['nacl_target_arch=="none"', {
+                'link_settings': {
+                  'libraries': [
+                    '-ldl',
+                    '-lrt'
+                  ],
+                },
+              }, {
+                'defines': [
+                  'V8_LIBRT_NOT_AVAILABLE=1',
+                ],
+              }],
+            ],
+            'sources': [
+              'base/platform/platform-linux.cc',
+              'base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="android"', {
+            'sources': [
+              'base/platform/platform-posix.cc'
+            ],
+            'link_settings': {
+              'target_conditions': [
+                ['_toolset=="host" and host_os!="mac"', {
+                  # Only include libdl and librt on host builds because they
+                  # are included by default on Android target builds, and we
+                  # don't want to re-include them here since this will change
+                  # library order and break (see crbug.com/469973).
+                  # These libraries do not exist on Mac hosted builds.
+                  'libraries': [
+                    '-ldl',
+                    '-lrt'
+                  ]
+                }]
+              ]
+            },
+            'conditions': [
+              ['host_os=="mac"', {
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'sources': [
+                      'base/platform/platform-macos.cc'
+                    ]
+                  }, {
+                    'sources': [
+                      'base/platform/platform-linux.cc'
+                    ]
+                  }],
+                ],
+              }, {
+                'sources': [
+                  'base/platform/platform-linux.cc'
+                ]
+              }],
+            ],
+          },
+        ],
+        ['OS=="qnx"', {
+            'link_settings': {
+              'target_conditions': [
+                ['_toolset=="host" and host_os=="linux"', {
+                  'libraries': [
+                    '-lrt'
+                  ],
+                }],
+                ['_toolset=="target"', {
+                  'libraries': [
+                    '-lbacktrace'
+                  ],
+                }],
+              ],
+            },
+            'sources': [
+              'base/platform/platform-posix.cc',
+              'base/qnx-math.h',
+            ],
+            'target_conditions': [
+              ['_toolset=="host" and host_os=="linux"', {
+                'sources': [
+                  'base/platform/platform-linux.cc'
+                ],
+              }],
+              ['_toolset=="host" and host_os=="mac"', {
+                'sources': [
+                  'base/platform/platform-macos.cc'
+                ],
+              }],
+              ['_toolset=="target"', {
+                'sources': [
+                  'base/platform/platform-qnx.cc'
+                ],
+              }],
+            ],
+          },
+        ],
+        ['OS=="freebsd"', {
+            'link_settings': {
+              'libraries': [
+                '-L/usr/local/lib -lexecinfo',
+            ]},
+            'sources': [
+              'base/platform/platform-freebsd.cc',
+              'base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="openbsd"', {
+            'link_settings': {
+              'libraries': [
+                '-L/usr/local/lib -lexecinfo',
+            ]},
+            'sources': [
+              'base/platform/platform-openbsd.cc',
+              'base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="netbsd"', {
+            'link_settings': {
+              'libraries': [
+                '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
+            ]},
+            'sources': [
+              'base/platform/platform-openbsd.cc',
+              'base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="aix"', {
+          'sources': [
+            'base/platform/platform-aix.cc',
+            'base/platform/platform-posix.cc'
+          ]},
+        ],
+        ['OS=="solaris"', {
+            'link_settings': {
+              'libraries': [
+                '-lnsl -lrt',
+            ]},
+            'sources': [
+              'base/platform/platform-solaris.cc',
+              'base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="mac"', {
+          'sources': [
+            'base/platform/platform-macos.cc',
+            'base/platform/platform-posix.cc'
+          ]},
+        ],
+        ['OS=="win"', {
+          'defines': [
+            '_CRT_RAND_S'  # for rand_s()
+          ],
+          'variables': {
+            'gyp_generators': '<!(echo $GYP_GENERATORS)',
+          },
+          'conditions': [
+            ['gyp_generators=="make"', {
+              'variables': {
+                'build_env': '<!(uname -o)',
+              },
+              'conditions': [
+                ['build_env=="Cygwin"', {
+                  'sources': [
+                    'base/platform/platform-cygwin.cc',
+                    'base/platform/platform-posix.cc'
+                  ],
+                }, {
+                  'sources': [
+                    'base/platform/platform-win32.cc',
+                    'base/win32-headers.h',
+                  ],
+                }],
+              ],
+              'link_settings':  {
+                'libraries': [ '-lwinmm', '-lws2_32' ],
+              },
+            }, {
+              'sources': [
+                'base/platform/platform-win32.cc',
+                'base/win32-headers.h',
+              ],
+              'msvs_disabled_warnings': [4351, 4355, 4800],
+              'link_settings':  {
+                'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
+              },
+            }],
+          ],
+        }],
+      ],
+    },
+    {
+      'target_name': 'v8_libplatform',
+      'type': 'static_library',
+      'variables': {
+        'optimize': 'max',
+      },
+      'dependencies': [
+        'v8_libbase',
+      ],
+      'include_dirs+': [
+        '..',
+        '../include',
+      ],
+      'sources': [
+        '../include/libplatform/libplatform.h',
+        'libplatform/default-platform.cc',
+        'libplatform/default-platform.h',
+        'libplatform/task-queue.cc',
+        'libplatform/task-queue.h',
+        'libplatform/worker-thread.cc',
+        'libplatform/worker-thread.h',
+      ],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '../include',
+        ],
+      },
+    },
+    {
+      'target_name': 'natives_blob',
+      'type': 'none',
+      'conditions': [
+        [ 'v8_use_external_startup_data==1', {
+          'conditions': [
+            ['want_separate_host_toolset==1', {
+              'dependencies': ['js2c#host'],
+            }, {
+              'dependencies': ['js2c'],
+            }],
+          ],
+          'actions': [{
+            'action_name': 'concatenate_natives_blob',
+            'inputs': [
+              '../tools/concatenate-files.py',
+              '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+              '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
+              '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+              '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
+            ],
+            'conditions': [
+              ['want_separate_host_toolset==1', {
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'outputs': [
+                      '<(PRODUCT_DIR)/natives_blob_host.bin',
+                    ],
+                    'action': [
+                      'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
+                    ],
+                  }, {
+                    'outputs': [
+                      '<(PRODUCT_DIR)/natives_blob.bin',
+                    ],
+                    'action': [
+                      'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+                    ],
+                  }],
+                ],
+              }, {
+                'outputs': [
+                  '<(PRODUCT_DIR)/natives_blob.bin',
+                ],
+                'action': [
+                  'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+                ],
+              }],
+            ],
+          }],
+        }],
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ]
+    },
+    {
+      'target_name': 'js2c',
+      'type': 'none',
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ],
+      'variables': {
+        'library_files': [
+          'js/macros.py',
+          'messages.h',
+          'js/prologue.js',
+          'js/runtime.js',
+          'js/v8natives.js',
+          'js/symbol.js',
+          'js/array.js',
+          'js/string.js',
+          'js/uri.js',
+          'js/math.js',
+          'third_party/fdlibm/fdlibm.js',
+          'js/regexp.js',
+          'js/arraybuffer.js',
+          'js/typedarray.js',
+          'js/iterator-prototype.js',
+          'js/collection.js',
+          'js/weak-collection.js',
+          'js/collection-iterator.js',
+          'js/promise.js',
+          'js/messages.js',
+          'js/json.js',
+          'js/array-iterator.js',
+          'js/string-iterator.js',
+          'js/templates.js',
+          'js/spread.js',
+          'js/proxy.js',
+          'debug/mirrors.js',
+          'debug/debug.js',
+          'debug/liveedit.js',
+        ],
+        'experimental_library_files': [
+          'js/macros.py',
+          'messages.h',
+          'js/harmony-atomics.js',
+          'js/harmony-regexp-exec.js',
+          'js/harmony-sharedarraybuffer.js',
+          'js/harmony-simd.js',
+          'js/harmony-species.js',
+          'js/harmony-unicode-regexps.js',
+          'js/harmony-string-padding.js',
+          'js/promise-extra.js',
+          'js/harmony-async-await.js'
+        ],
+        'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+        'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
+        'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+        'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
+        'conditions': [
+          ['v8_enable_i18n_support==1', {
+            'library_files': ['js/i18n.js'],
+            'experimental_library_files': [
+              'js/icu-case-mapping.js',
+              'js/intl-extra.js',
+             ],
+          }],
+        ],
+      },
+      'actions': [
+        {
+          'action_name': 'js2c',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(library_files)',
+          ],
+          'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+            'CORE',
+            '<@(library_files)',
+          ],
+        },
+        {
+          'action_name': 'js2c_bin',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(library_files)',
+          ],
+          'outputs': ['<@(libraries_bin_file)'],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+            'CORE',
+            '<@(library_files)',
+            '--startup_blob', '<@(libraries_bin_file)',
+            '--nojs',
+          ],
+        },
+        {
+          'action_name': 'js2c_experimental',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(experimental_library_files)',
+          ],
+          'outputs': ['<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc'],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+            'EXPERIMENTAL',
+            '<@(experimental_library_files)',
+          ],
+        },
+        {
+          'action_name': 'js2c_experimental_bin',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(experimental_library_files)',
+          ],
+          'outputs': ['<@(libraries_experimental_bin_file)'],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+            'EXPERIMENTAL',
+            '<@(experimental_library_files)',
+            '--startup_blob', '<@(libraries_experimental_bin_file)',
+            '--nojs',
+          ],
+        },
+        {
+          'action_name': 'js2c_extras',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(v8_extra_library_files)',
+          ],
+          'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+            'EXTRAS',
+            '<@(v8_extra_library_files)',
+          ],
+        },
+        {
+          'action_name': 'js2c_extras_bin',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(v8_extra_library_files)',
+          ],
+          'outputs': ['<@(libraries_extras_bin_file)'],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+            'EXTRAS',
+            '<@(v8_extra_library_files)',
+            '--startup_blob', '<@(libraries_extras_bin_file)',
+            '--nojs',
+          ],
+        },
+        {
+          'action_name': 'js2c_experimental_extras',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(v8_experimental_extra_library_files)',
+          ],
+          'outputs': [
+            '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+          ],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+            'EXPERIMENTAL_EXTRAS',
+            '<@(v8_experimental_extra_library_files)',
+          ],
+        },
+        {
+          'action_name': 'js2c_experimental_extras_bin',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(v8_experimental_extra_library_files)',
+          ],
+          'outputs': ['<@(libraries_experimental_extras_bin_file)'],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+            'EXPERIMENTAL_EXTRAS',
+            '<@(v8_experimental_extra_library_files)',
+            '--startup_blob', '<@(libraries_experimental_extras_bin_file)',
+            '--nojs',
+          ],
+        },
+      ],
+    },
+    {
+      'target_name': 'postmortem-metadata',
+      'type': 'none',
+      'variables': {
+        'heapobject_files': [
+            'objects.h',
+            'objects-inl.h',
+        ],
+      },
+      'actions': [
+          {
+            'action_name': 'gen-postmortem-metadata',
+            'inputs': [
+              '../tools/gen-postmortem-metadata.py',
+              '<@(heapobject_files)',
+            ],
+            'outputs': [
+              '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+            ],
+            'action': [
+              'python',
+              '../tools/gen-postmortem-metadata.py',
+              '<@(_outputs)',
+              '<@(heapobject_files)'
+            ]
+          }
+        ]
+    },
+    {
+      'target_name': 'mksnapshot',
+      'type': 'executable',
+      'dependencies': ['v8_base', 'v8_nosnapshot', 'v8_libplatform'],
+      'include_dirs+': [
+        '..',
+      ],
+      'sources': [
+        'snapshot/mksnapshot.cc',
+      ],
+      'conditions': [
+        ['v8_enable_i18n_support==1', {
+          'dependencies': [
+            '<(icu_gyp_path):icui18n',
+            '<(icu_gyp_path):icuuc',
+          ]
+        }],
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/v8.h b/src/v8.h
index 6016ef1..a1b18b2 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -21,7 +21,7 @@
   // Report process out of memory. Implementation found in api.cc.
   // This function will not return, but will terminate the execution.
   static void FatalProcessOutOfMemory(const char* location,
-                                      bool take_snapshot = false);
+                                      bool is_heap_oom = false);
 
   static void InitializePlatform(v8::Platform* platform);
   static void ShutdownPlatform();
diff --git a/src/vector.h b/src/vector.h
index e4637c9..d120dfc 100644
--- a/src/vector.h
+++ b/src/vector.h
@@ -24,6 +24,9 @@
     DCHECK(length == 0 || (length > 0 && data != NULL));
   }
 
+  template <int N>
+  explicit Vector(T (&arr)[N]) : start_(arr), length_(N) {}
+
   static Vector<T> New(int length) {
     return Vector<T>(NewArray<T>(length), length);
   }
@@ -201,6 +204,10 @@
   return Vector<char>(data, (length < max) ? length : max);
 }
 
+template <typename T, int N>
+inline Vector<T> ArrayVector(T (&arr)[N]) {
+  return Vector<T>(arr);
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index c8bd4e8..35b69a1 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -63,19 +63,11 @@
   scope_address_ = Simulator::current(isolate)->get_sp();
 #endif
   isolate_->set_external_callback_scope(this);
-  if (FLAG_runtime_call_stats) {
-    RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
-    timer_.Initialize(&stats->ExternalCallback, stats->current_timer());
-    stats->Enter(&timer_);
-  }
   TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
                      "V8.ExternalCallback");
 }
 
 ExternalCallbackScope::~ExternalCallbackScope() {
-  if (FLAG_runtime_call_stats) {
-    isolate_->counters()->runtime_call_stats()->Leave(&timer_);
-  }
   isolate_->set_external_callback_scope(previous_scope_);
   TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
                    "V8.ExternalCallback");
diff --git a/src/vm-state.h b/src/vm-state.h
index 3f8d381..29cbf39 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -49,7 +49,6 @@
   Isolate* isolate_;
   Address callback_;
   ExternalCallbackScope* previous_scope_;
-  RuntimeCallTimer timer_;
 #ifdef USE_SIMULATOR
   Address scope_address_;
 #endif
diff --git a/src/wasm/asm-wasm-builder.cc b/src/wasm/asm-wasm-builder.cc
index d16d3a8..325058c 100644
--- a/src/wasm/asm-wasm-builder.cc
+++ b/src/wasm/asm-wasm-builder.cc
@@ -11,6 +11,7 @@
 #include <math.h>
 
 #include "src/wasm/asm-wasm-builder.h"
+#include "src/wasm/switch-logic.h"
 #include "src/wasm/wasm-macro-gen.h"
 #include "src/wasm/wasm-opcodes.h"
 
@@ -30,6 +31,7 @@
     if (HasStackOverflow()) return; \
   } while (false)
 
+enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
 
 class AsmWasmBuilderImpl : public AstVisitor {
  public:
@@ -43,9 +45,7 @@
         global_variables_(HashMap::PointersMatch,
                           ZoneHashMap::kDefaultHashMapCapacity,
                           ZoneAllocationPolicy(zone)),
-        in_function_(false),
-        is_set_op_(false),
-        marking_exported(false),
+        scope_(kModuleScope),
         builder_(new (zone) WasmModuleBuilder(zone)),
         current_function_builder_(nullptr),
         literal_(literal),
@@ -55,20 +55,21 @@
         typer_(typer),
         cache_(TypeCache::Get()),
         breakable_blocks_(zone),
-        block_size_(0),
         init_function_index_(0),
         next_table_index_(0),
         function_tables_(HashMap::PointersMatch,
                          ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
-        imported_function_table_(this) {
+        imported_function_table_(this),
+        bounds_(typer->bounds()) {
     InitializeAstVisitor(isolate);
   }
 
   void InitializeInitFunction() {
     init_function_index_ = builder_->AddFunction();
+    FunctionSig::Builder b(zone(), 0, 0);
     current_function_builder_ = builder_->FunctionAt(init_function_index_);
-    current_function_builder_->ReturnType(kAstStmt);
+    current_function_builder_->SetSignature(b.Build());
     builder_->MarkStartFunction(init_function_index_);
     current_function_builder_ = nullptr;
   }
@@ -81,13 +82,13 @@
   void VisitVariableDeclaration(VariableDeclaration* decl) {}
 
   void VisitFunctionDeclaration(FunctionDeclaration* decl) {
-    DCHECK(!in_function_);
+    DCHECK_EQ(kModuleScope, scope_);
     DCHECK_NULL(current_function_builder_);
-    uint16_t index = LookupOrInsertFunction(decl->proxy()->var());
+    uint32_t index = LookupOrInsertFunction(decl->proxy()->var());
     current_function_builder_ = builder_->FunctionAt(index);
-    in_function_ = true;
+    scope_ = kFuncScope;
     RECURSE(Visit(decl->fun()));
-    in_function_ = false;
+    scope_ = kModuleScope;
     current_function_builder_ = nullptr;
     local_variables_.Clear();
   }
@@ -99,6 +100,10 @@
   void VisitStatements(ZoneList<Statement*>* stmts) {
     for (int i = 0; i < stmts->length(); ++i) {
       Statement* stmt = stmts->at(i);
+      ExpressionStatement* e = stmt->AsExpressionStatement();
+      if (e != nullptr && e->expression()->IsUndefinedLiteral()) {
+        continue;
+      }
       RECURSE(Visit(stmt));
       if (stmt->IsJump()) break;
     }
@@ -115,12 +120,10 @@
         }
       }
     }
-    if (in_function_) {
+    if (scope_ == kFuncScope) {
       BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
-                           false,
-                           static_cast<byte>(stmt->statements()->length()));
+                           false);
       RECURSE(VisitStatements(stmt->statements()));
-      DCHECK(block_size_ >= 0);
     } else {
       RECURSE(VisitStatements(stmt->statements()));
     }
@@ -128,25 +131,17 @@
 
   class BlockVisitor {
    private:
-    int prev_block_size_;
-    uint32_t index_;
     AsmWasmBuilderImpl* builder_;
 
    public:
     BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
-                 WasmOpcode opcode, bool is_loop, int initial_block_size)
+                 WasmOpcode opcode, bool is_loop)
         : builder_(builder) {
       builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
       builder_->current_function_builder_->Emit(opcode);
-      index_ =
-          builder_->current_function_builder_->EmitEditableVarIntImmediate();
-      prev_block_size_ = builder_->block_size_;
-      builder_->block_size_ = initial_block_size;
     }
     ~BlockVisitor() {
-      builder_->current_function_builder_->EditVarIntImmediate(
-          index_, builder_->block_size_);
-      builder_->block_size_ = prev_block_size_;
+      builder_->current_function_builder_->Emit(kExprEnd);
       builder_->breakable_blocks_.pop_back();
     }
   };
@@ -160,25 +155,24 @@
   void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
 
   void VisitIfStatement(IfStatement* stmt) {
-    DCHECK(in_function_);
-    if (stmt->HasElseStatement()) {
-      current_function_builder_->Emit(kExprIfElse);
-    } else {
-      current_function_builder_->Emit(kExprIf);
-    }
+    DCHECK_EQ(kFuncScope, scope_);
     RECURSE(Visit(stmt->condition()));
+    current_function_builder_->Emit(kExprIf);
+    // WASM ifs come with implement blocks for both arms.
+    breakable_blocks_.push_back(std::make_pair(nullptr, false));
     if (stmt->HasThenStatement()) {
       RECURSE(Visit(stmt->then_statement()));
-    } else {
-      current_function_builder_->Emit(kExprNop);
     }
     if (stmt->HasElseStatement()) {
+      current_function_builder_->Emit(kExprElse);
       RECURSE(Visit(stmt->else_statement()));
     }
+    current_function_builder_->Emit(kExprEnd);
+    breakable_blocks_.pop_back();
   }
 
   void VisitContinueStatement(ContinueStatement* stmt) {
-    DCHECK(in_function_);
+    DCHECK_EQ(kFuncScope, scope_);
     DCHECK_NOT_NULL(stmt->target());
     int i = static_cast<int>(breakable_blocks_.size()) - 1;
     int block_distance = 0;
@@ -194,12 +188,12 @@
       }
     }
     DCHECK(i >= 0);
-    current_function_builder_->EmitWithVarInt(kExprBr, block_distance);
-    current_function_builder_->Emit(kExprNop);
+    current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+    current_function_builder_->EmitVarInt(block_distance);
   }
 
   void VisitBreakStatement(BreakStatement* stmt) {
-    DCHECK(in_function_);
+    DCHECK_EQ(kFuncScope, scope_);
     DCHECK_NOT_NULL(stmt->target());
     int i = static_cast<int>(breakable_blocks_.size()) - 1;
     int block_distance = 0;
@@ -217,123 +211,191 @@
       }
     }
     DCHECK(i >= 0);
-    current_function_builder_->EmitWithVarInt(kExprBr, block_distance);
-    current_function_builder_->Emit(kExprNop);
+    current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+    current_function_builder_->EmitVarInt(block_distance);
   }
 
   void VisitReturnStatement(ReturnStatement* stmt) {
-    if (in_function_) {
-      current_function_builder_->Emit(kExprReturn);
+    if (scope_ == kModuleScope) {
+      scope_ = kExportScope;
+      RECURSE(Visit(stmt->expression()));
+      scope_ = kModuleScope;
+    } else if (scope_ == kFuncScope) {
+      RECURSE(Visit(stmt->expression()));
+      uint8_t arity =
+          TypeOf(stmt->expression()) == kAstStmt ? ARITY_0 : ARITY_1;
+      current_function_builder_->EmitWithU8(kExprReturn, arity);
     } else {
-      marking_exported = true;
-    }
-    RECURSE(Visit(stmt->expression()));
-    if (!in_function_) {
-      marking_exported = false;
+      UNREACHABLE();
     }
   }
 
   void VisitWithStatement(WithStatement* stmt) { UNREACHABLE(); }
 
-  void SetLocalTo(uint16_t index, int value) {
-    current_function_builder_->Emit(kExprSetLocal);
-    AddLeb128(index, true);
-    // TODO(bradnelson): variable size
-    byte code[] = {WASM_I32V(value)};
-    current_function_builder_->EmitCode(code, sizeof(code));
-    block_size_++;
-  }
+  void HandleCase(CaseNode* node,
+                  const ZoneMap<int, unsigned int>& case_to_block,
+                  VariableProxy* tag, int default_block, int if_depth) {
+    int prev_if_depth = if_depth;
+    if (node->left != nullptr) {
+      VisitVariableProxy(tag);
+      current_function_builder_->EmitI32Const(node->begin);
+      current_function_builder_->Emit(kExprI32LtS);
+      current_function_builder_->Emit(kExprIf);
+      if_depth++;
+      breakable_blocks_.push_back(std::make_pair(nullptr, false));
+      HandleCase(node->left, case_to_block, tag, default_block, if_depth);
+      current_function_builder_->Emit(kExprElse);
+    }
+    if (node->right != nullptr) {
+      VisitVariableProxy(tag);
+      current_function_builder_->EmitI32Const(node->end);
+      current_function_builder_->Emit(kExprI32GtS);
+      current_function_builder_->Emit(kExprIf);
+      if_depth++;
+      breakable_blocks_.push_back(std::make_pair(nullptr, false));
+      HandleCase(node->right, case_to_block, tag, default_block, if_depth);
+      current_function_builder_->Emit(kExprElse);
+    }
+    if (node->begin == node->end) {
+      VisitVariableProxy(tag);
+      current_function_builder_->EmitI32Const(node->begin);
+      current_function_builder_->Emit(kExprI32Eq);
+      current_function_builder_->Emit(kExprIf);
+      DCHECK(case_to_block.find(node->begin) != case_to_block.end());
+      current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+      current_function_builder_->EmitVarInt(1 + if_depth +
+                                            case_to_block.at(node->begin));
+      current_function_builder_->Emit(kExprEnd);
+    } else {
+      if (node->begin != 0) {
+        VisitVariableProxy(tag);
+        current_function_builder_->EmitI32Const(node->begin);
+        current_function_builder_->Emit(kExprI32Sub);
+      } else {
+        VisitVariableProxy(tag);
+      }
+      current_function_builder_->EmitWithU8(kExprBrTable, ARITY_0);
+      current_function_builder_->EmitVarInt(node->end - node->begin + 1);
+      for (int v = node->begin; v <= node->end; v++) {
+        if (case_to_block.find(v) != case_to_block.end()) {
+          byte break_code[] = {BR_TARGET(if_depth + case_to_block.at(v))};
+          current_function_builder_->EmitCode(break_code, sizeof(break_code));
+        } else {
+          byte break_code[] = {BR_TARGET(if_depth + default_block)};
+          current_function_builder_->EmitCode(break_code, sizeof(break_code));
+        }
+        if (v == kMaxInt) {
+          break;
+        }
+      }
+      byte break_code[] = {BR_TARGET(if_depth + default_block)};
+      current_function_builder_->EmitCode(break_code, sizeof(break_code));
+    }
 
-  void CompileCase(CaseClause* clause, uint16_t fall_through,
-                   VariableProxy* tag) {
-    Literal* label = clause->label()->AsLiteral();
-    DCHECK_NOT_NULL(label);
-    block_size_++;
-    current_function_builder_->Emit(kExprIf);
-    current_function_builder_->Emit(kExprI32Ior);
-    current_function_builder_->Emit(kExprI32Eq);
-    VisitVariableProxy(tag);
-    VisitLiteral(label);
-    current_function_builder_->Emit(kExprGetLocal);
-    AddLeb128(fall_through, true);
-    BlockVisitor visitor(this, nullptr, kExprBlock, false, 0);
-    SetLocalTo(fall_through, 1);
-    ZoneList<Statement*>* stmts = clause->statements();
-    block_size_ += stmts->length();
-    RECURSE(VisitStatements(stmts));
+    while (if_depth-- != prev_if_depth) {
+      breakable_blocks_.pop_back();
+      current_function_builder_->Emit(kExprEnd);
+    }
   }
 
   void VisitSwitchStatement(SwitchStatement* stmt) {
     VariableProxy* tag = stmt->tag()->AsVariableProxy();
     DCHECK_NOT_NULL(tag);
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
-                         0);
-    uint16_t fall_through = current_function_builder_->AddLocal(kAstI32);
-    SetLocalTo(fall_through, 0);
-
     ZoneList<CaseClause*>* clauses = stmt->cases();
-    for (int i = 0; i < clauses->length(); ++i) {
+    int case_count = clauses->length();
+    if (case_count == 0) {
+      return;
+    }
+    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false);
+    ZoneVector<BlockVisitor*> blocks(zone_);
+    ZoneVector<int32_t> cases(zone_);
+    ZoneMap<int, unsigned int> case_to_block(zone_);
+    bool has_default = false;
+    for (int i = case_count - 1; i >= 0; i--) {
       CaseClause* clause = clauses->at(i);
+      blocks.push_back(new BlockVisitor(this, nullptr, kExprBlock, false));
       if (!clause->is_default()) {
-        CompileCase(clause, fall_through, tag);
+        Literal* label = clause->label()->AsLiteral();
+        Handle<Object> value = label->value();
+        DCHECK(value->IsNumber() &&
+               bounds_->get(label).upper->Is(cache_.kAsmSigned));
+        int32_t label_value;
+        if (!value->ToInt32(&label_value)) {
+          UNREACHABLE();
+        }
+        case_to_block[label_value] = i;
+        cases.push_back(label_value);
       } else {
-        ZoneList<Statement*>* stmts = clause->statements();
-        block_size_ += stmts->length();
-        RECURSE(VisitStatements(stmts));
+        DCHECK_EQ(i, case_count - 1);
+        has_default = true;
       }
     }
+    if (!has_default || case_count > 1) {
+      int default_block = has_default ? case_count - 1 : case_count;
+      BlockVisitor switch_logic_block(this, nullptr, kExprBlock, false);
+      CaseNode* root = OrderCases(&cases, zone_);
+      HandleCase(root, case_to_block, tag, default_block, 0);
+      if (root->left != nullptr || root->right != nullptr ||
+          root->begin == root->end) {
+        current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+        current_function_builder_->EmitVarInt(default_block);
+      }
+    }
+    for (int i = 0; i < case_count; i++) {
+      CaseClause* clause = clauses->at(i);
+      RECURSE(VisitStatements(clause->statements()));
+      BlockVisitor* v = blocks.at(case_count - i - 1);
+      blocks.pop_back();
+      delete v;
+    }
   }
 
   void VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
 
   void VisitDoWhileStatement(DoWhileStatement* stmt) {
-    DCHECK(in_function_);
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
-                         2);
+    DCHECK_EQ(kFuncScope, scope_);
+    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
     RECURSE(Visit(stmt->body()));
-    current_function_builder_->Emit(kExprIf);
     RECURSE(Visit(stmt->cond()));
-    current_function_builder_->EmitWithVarInt(kExprBr, 0);
-    current_function_builder_->Emit(kExprNop);
+    current_function_builder_->Emit(kExprIf);
+    current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 1);
+    current_function_builder_->Emit(kExprEnd);
   }
 
   void VisitWhileStatement(WhileStatement* stmt) {
-    DCHECK(in_function_);
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
-                         1);
-    current_function_builder_->Emit(kExprIf);
+    DCHECK_EQ(kFuncScope, scope_);
+    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
     RECURSE(Visit(stmt->cond()));
-    current_function_builder_->EmitWithVarInt(kExprBr, 0);
+    breakable_blocks_.push_back(std::make_pair(nullptr, false));
+    current_function_builder_->Emit(kExprIf);
     RECURSE(Visit(stmt->body()));
+    current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 1);
+    current_function_builder_->Emit(kExprEnd);
+    breakable_blocks_.pop_back();
   }
 
   void VisitForStatement(ForStatement* stmt) {
-    DCHECK(in_function_);
+    DCHECK_EQ(kFuncScope, scope_);
     if (stmt->init() != nullptr) {
-      block_size_++;
       RECURSE(Visit(stmt->init()));
     }
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
-                         0);
+    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
     if (stmt->cond() != nullptr) {
-      block_size_++;
-      current_function_builder_->Emit(kExprIf);
-      current_function_builder_->Emit(kExprI32Eqz);
       RECURSE(Visit(stmt->cond()));
-      current_function_builder_->EmitWithVarInt(kExprBr, 1);
+      current_function_builder_->Emit(kExprI32Eqz);
+      current_function_builder_->Emit(kExprIf);
       current_function_builder_->Emit(kExprNop);
+      current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 2);
+      current_function_builder_->Emit(kExprEnd);
     }
     if (stmt->body() != nullptr) {
-      block_size_++;
       RECURSE(Visit(stmt->body()));
     }
     if (stmt->next() != nullptr) {
-      block_size_++;
       RECURSE(Visit(stmt->next()));
     }
-    block_size_++;
-    current_function_builder_->EmitWithVarInt(kExprBr, 0);
     current_function_builder_->Emit(kExprNop);
+    current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 0);
   }
 
   void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
@@ -348,16 +410,21 @@
 
   void VisitFunctionLiteral(FunctionLiteral* expr) {
     Scope* scope = expr->scope();
-    if (in_function_) {
-      if (expr->bounds().lower->IsFunction()) {
-        FunctionType* func_type = expr->bounds().lower->AsFunction();
+    if (scope_ == kFuncScope) {
+      if (bounds_->get(expr).lower->IsFunction()) {
+        // Build the signature for the function.
+        FunctionType* func_type = bounds_->get(expr).lower->AsFunction();
         LocalType return_type = TypeFrom(func_type->Result());
-        current_function_builder_->ReturnType(return_type);
+        FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
+                               func_type->Arity());
+        if (return_type != kAstStmt) b.AddReturn(return_type);
         for (int i = 0; i < expr->parameter_count(); i++) {
           LocalType type = TypeFrom(func_type->Parameter(i));
           DCHECK_NE(kAstStmt, type);
-          LookupOrInsertLocal(scope->parameter(i), type);
+          b.AddParam(type);
+          InsertParameter(scope->parameter(i), type, i);
         }
+        current_function_builder_->SetSignature(b.Build());
       } else {
         UNREACHABLE();
       }
@@ -371,11 +438,16 @@
   }
 
   void VisitConditional(Conditional* expr) {
-    DCHECK(in_function_);
-    current_function_builder_->Emit(kExprIfElse);
+    DCHECK_EQ(kFuncScope, scope_);
     RECURSE(Visit(expr->condition()));
+    // WASM ifs come with implicit blocks for both arms.
+    breakable_blocks_.push_back(std::make_pair(nullptr, false));
+    current_function_builder_->Emit(kExprIf);
     RECURSE(Visit(expr->then_expression()));
+    current_function_builder_->Emit(kExprElse);
     RECURSE(Visit(expr->else_expression()));
+    current_function_builder_->Emit(kExprEnd);
+    breakable_blocks_.pop_back();
   }
 
   bool VisitStdlibConstant(Variable* var) {
@@ -431,41 +503,29 @@
   }
 
   void VisitVariableProxy(VariableProxy* expr) {
-    if (in_function_) {
+    if (scope_ == kFuncScope || scope_ == kInitScope) {
       Variable* var = expr->var();
-      if (is_set_op_) {
-        if (var->IsContextSlot()) {
-          current_function_builder_->Emit(kExprStoreGlobal);
-        } else {
-          current_function_builder_->Emit(kExprSetLocal);
-        }
-        is_set_op_ = false;
-      } else {
-        if (VisitStdlibConstant(var)) {
-          return;
-        }
-        if (var->IsContextSlot()) {
-          current_function_builder_->Emit(kExprLoadGlobal);
-        } else {
-          current_function_builder_->Emit(kExprGetLocal);
-        }
+      if (VisitStdlibConstant(var)) {
+        return;
       }
       LocalType var_type = TypeOf(expr);
       DCHECK_NE(kAstStmt, var_type);
       if (var->IsContextSlot()) {
-        AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+        current_function_builder_->EmitWithVarInt(
+            kExprLoadGlobal, LookupOrInsertGlobal(var, var_type));
       } else {
-        AddLeb128(LookupOrInsertLocal(var, var_type), true);
+        current_function_builder_->EmitGetLocal(
+            LookupOrInsertLocal(var, var_type));
       }
     }
   }
 
   void VisitLiteral(Literal* expr) {
     Handle<Object> value = expr->value();
-    if (!in_function_ || !value->IsNumber()) {
+    if (!value->IsNumber() || (scope_ != kFuncScope && scope_ != kInitScope)) {
       return;
     }
-    Type* type = expr->bounds().upper;
+    Type* type = bounds_->get(expr).upper;
     if (type->Is(cache_.kAsmSigned)) {
       int32_t i = 0;
       if (!value->ToInt32(&i)) {
@@ -496,7 +556,7 @@
     ZoneList<ObjectLiteralProperty*>* props = expr->properties();
     for (int i = 0; i < props->length(); ++i) {
       ObjectLiteralProperty* prop = props->at(i);
-      DCHECK(marking_exported);
+      DCHECK_EQ(kExportScope, scope_);
       VariableProxy* expr = prop->value()->AsVariableProxy();
       DCHECK_NOT_NULL(expr);
       Variable* var = expr->var();
@@ -505,10 +565,11 @@
       DCHECK(name->IsPropertyName());
       const AstRawString* raw_name = name->AsRawPropertyName();
       if (var->is_function()) {
-        uint16_t index = LookupOrInsertFunction(var);
+        uint32_t index = LookupOrInsertFunction(var);
         builder_->FunctionAt(index)->Exported(1);
-        builder_->FunctionAt(index)
-            ->SetName(raw_name->raw_data(), raw_name->length());
+        builder_->FunctionAt(index)->SetName(
+            reinterpret_cast<const char*>(raw_name->raw_data()),
+            raw_name->length());
       }
     }
   }
@@ -517,17 +578,17 @@
 
   void LoadInitFunction() {
     current_function_builder_ = builder_->FunctionAt(init_function_index_);
-    in_function_ = true;
+    scope_ = kInitScope;
   }
 
   void UnLoadInitFunction() {
-    in_function_ = false;
+    scope_ = kModuleScope;
     current_function_builder_ = nullptr;
   }
 
   void AddFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
     FunctionType* func_type =
-        funcs->bounds().lower->AsArray()->Element()->AsFunction();
+        bounds_->get(funcs).lower->AsArray()->Element()->AsFunction();
     LocalType return_type = TypeFrom(func_type->Result());
     FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
                              func_type->Arity());
@@ -537,7 +598,7 @@
     for (int i = 0; i < func_type->Arity(); i++) {
       sig.AddParam(TypeFrom(func_type->Parameter(i)));
     }
-    uint16_t signature_index = builder_->AddSignature(sig.Build());
+    uint32_t signature_index = builder_->AddSignature(sig.Build());
     InsertFunctionTable(table->var(), next_table_index_, signature_index);
     next_table_index_ += funcs->values()->length();
     for (int i = 0; i < funcs->values()->length(); i++) {
@@ -549,11 +610,11 @@
 
   struct FunctionTableIndices : public ZoneObject {
     uint32_t start_index;
-    uint16_t signature_index;
+    uint32_t signature_index;
   };
 
   void InsertFunctionTable(Variable* v, uint32_t start_index,
-                           uint16_t signature_index) {
+                           uint32_t signature_index) {
     FunctionTableIndices* container = new (zone()) FunctionTableIndices();
     container->start_index = start_index;
     container->signature_index = signature_index;
@@ -573,12 +634,11 @@
    private:
     class ImportedFunctionIndices : public ZoneObject {
      public:
-      const unsigned char* name_;
+      const char* name_;
       int name_length_;
       WasmModuleBuilder::SignatureMap signature_to_index_;
 
-      ImportedFunctionIndices(const unsigned char* name, int name_length,
-                              Zone* zone)
+      ImportedFunctionIndices(const char* name, int name_length, Zone* zone)
           : name_(name), name_length_(name_length), signature_to_index_(zone) {}
     };
     ZoneHashMap table_;
@@ -590,7 +650,7 @@
                  ZoneAllocationPolicy(builder->zone())),
           builder_(builder) {}
 
-    void AddImport(Variable* v, const unsigned char* name, int name_length) {
+    void AddImport(Variable* v, const char* name, int name_length) {
       ImportedFunctionIndices* indices = new (builder_->zone())
           ImportedFunctionIndices(name, name_length, builder_->zone());
       ZoneHashMap::Entry* entry = table_.LookupOrInsert(
@@ -598,7 +658,7 @@
       entry->value = indices;
     }
 
-    uint16_t GetFunctionIndex(Variable* v, FunctionSig* sig) {
+    uint32_t GetFunctionIndex(Variable* v, FunctionSig* sig) {
       ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
       DCHECK_NOT_NULL(entry);
       ImportedFunctionIndices* indices =
@@ -608,60 +668,137 @@
       if (pos != indices->signature_to_index_.end()) {
         return pos->second;
       } else {
-        uint16_t index = builder_->builder_->AddFunction();
+        uint32_t index = builder_->builder_->AddImport(
+            indices->name_, indices->name_length_, sig);
         indices->signature_to_index_[sig] = index;
-        WasmFunctionBuilder* function = builder_->builder_->FunctionAt(index);
-        function->External(1);
-        function->SetName(indices->name_, indices->name_length_);
-        if (sig->return_count() > 0) {
-          function->ReturnType(sig->GetReturn());
-        }
-        for (size_t i = 0; i < sig->parameter_count(); i++) {
-          function->AddParam(sig->GetParam(i));
-        }
         return index;
       }
     }
   };
 
-  void VisitAssignment(Assignment* expr) {
-    bool in_init = false;
-    if (!in_function_) {
-      BinaryOperation* binop = expr->value()->AsBinaryOperation();
-      if (binop != nullptr) {
+  void EmitAssignmentLhs(Expression* target, MachineType* mtype) {
+    // Match the left hand side of the assignment.
+    VariableProxy* target_var = target->AsVariableProxy();
+    if (target_var != nullptr) {
+      // Left hand side is a local or a global variable, no code on LHS.
+      return;
+    }
+
+    Property* target_prop = target->AsProperty();
+    if (target_prop != nullptr) {
+      // Left hand side is a property access, i.e. the asm.js heap.
+      VisitPropertyAndEmitIndex(target_prop, mtype);
+      return;
+    }
+
+    if (target_var == nullptr && target_prop == nullptr) {
+      UNREACHABLE();  // invalid assignment.
+    }
+  }
+
+  void EmitAssignmentRhs(Expression* target, Expression* value, bool* is_nop) {
+    BinaryOperation* binop = value->AsBinaryOperation();
+    if (binop != nullptr) {
+      if (scope_ == kInitScope) {
+        // Handle foreign variables in the initialization scope.
         Property* prop = binop->left()->AsProperty();
-        DCHECK_NOT_NULL(prop);
-        LoadInitFunction();
-        is_set_op_ = true;
-        RECURSE(Visit(expr->target()));
-        DCHECK(!is_set_op_);
         if (binop->op() == Token::MUL) {
           DCHECK(binop->right()->IsLiteral());
           DCHECK_EQ(1.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
           DCHECK(binop->right()->AsLiteral()->raw_value()->ContainsDot());
           VisitForeignVariable(true, prop);
+          return;
         } else if (binop->op() == Token::BIT_OR) {
           DCHECK(binop->right()->IsLiteral());
           DCHECK_EQ(0.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
           DCHECK(!binop->right()->AsLiteral()->raw_value()->ContainsDot());
           VisitForeignVariable(false, prop);
+          return;
         } else {
           UNREACHABLE();
         }
-        UnLoadInitFunction();
-        return;
       }
+      if (MatchBinaryOperation(binop) == kAsIs) {
+        VariableProxy* target_var = target->AsVariableProxy();
+        VariableProxy* effective_value_var = GetLeft(binop)->AsVariableProxy();
+        if (target_var != nullptr && effective_value_var != nullptr &&
+            target_var->var() == effective_value_var->var()) {
+          *is_nop = true;
+          return;
+        }
+      }
+    }
+    RECURSE(Visit(value));
+  }
+
+  void EmitAssignment(Assignment* expr, MachineType type) {
+    // Match the left hand side of the assignment.
+    VariableProxy* target_var = expr->target()->AsVariableProxy();
+    if (target_var != nullptr) {
+      // Left hand side is a local or a global variable.
+      Variable* var = target_var->var();
+      LocalType var_type = TypeOf(expr);
+      DCHECK_NE(kAstStmt, var_type);
+      if (var->IsContextSlot()) {
+        current_function_builder_->EmitWithVarInt(
+            kExprStoreGlobal, LookupOrInsertGlobal(var, var_type));
+      } else {
+        current_function_builder_->EmitSetLocal(
+            LookupOrInsertLocal(var, var_type));
+      }
+    }
+
+    Property* target_prop = expr->target()->AsProperty();
+    if (target_prop != nullptr) {
+      // Left hand side is a property access, i.e. the asm.js heap.
+      if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
+          bounds_->get(expr->target()->AsProperty()->obj())
+              .lower->Is(cache_.kFloat32Array)) {
+        current_function_builder_->Emit(kExprF32ConvertF64);
+      }
+      WasmOpcode opcode;
+      if (type == MachineType::Int8()) {
+        opcode = kExprI32AsmjsStoreMem8;
+      } else if (type == MachineType::Uint8()) {
+        opcode = kExprI32AsmjsStoreMem8;
+      } else if (type == MachineType::Int16()) {
+        opcode = kExprI32AsmjsStoreMem16;
+      } else if (type == MachineType::Uint16()) {
+        opcode = kExprI32AsmjsStoreMem16;
+      } else if (type == MachineType::Int32()) {
+        opcode = kExprI32AsmjsStoreMem;
+      } else if (type == MachineType::Uint32()) {
+        opcode = kExprI32AsmjsStoreMem;
+      } else if (type == MachineType::Float32()) {
+        opcode = kExprF32AsmjsStoreMem;
+      } else if (type == MachineType::Float64()) {
+        opcode = kExprF64AsmjsStoreMem;
+      } else {
+        UNREACHABLE();
+      }
+      current_function_builder_->Emit(opcode);
+    }
+
+    if (target_var == nullptr && target_prop == nullptr) {
+      UNREACHABLE();  // invalid assignment.
+    }
+  }
+
+  void VisitAssignment(Assignment* expr) {
+    bool as_init = false;
+    if (scope_ == kModuleScope) {
       Property* prop = expr->value()->AsProperty();
       if (prop != nullptr) {
         VariableProxy* vp = prop->obj()->AsVariableProxy();
         if (vp != nullptr && vp->var()->IsParameter() &&
             vp->var()->index() == 1) {
           VariableProxy* target = expr->target()->AsVariableProxy();
-          if (target->bounds().lower->Is(Type::Function())) {
+          if (bounds_->get(target).lower->Is(Type::Function())) {
             const AstRawString* name =
                 prop->key()->AsLiteral()->AsRawPropertyName();
-            imported_function_table_.AddImport(target->var(), name->raw_data(),
-                                               name->length());
+            imported_function_table_.AddImport(
+                target->var(), reinterpret_cast<const char*>(name->raw_data()),
+                name->length());
           }
         }
         // Property values in module scope don't emit code, so return.
@@ -669,7 +806,7 @@
       }
       ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
       if (funcs != nullptr &&
-          funcs->bounds().lower->AsArray()->Element()->IsFunction()) {
+          bounds_->get(funcs).lower->AsArray()->Element()->IsFunction()) {
         VariableProxy* target = expr->target()->AsVariableProxy();
         DCHECK_NOT_NULL(target);
         AddFunctionTable(target, funcs);
@@ -680,32 +817,18 @@
         // No init code to emit for CallNew nodes.
         return;
       }
-      in_init = true;
-      LoadInitFunction();
+      as_init = true;
     }
-    BinaryOperation* value_op = expr->value()->AsBinaryOperation();
-    if (value_op != nullptr && MatchBinaryOperation(value_op) == kAsIs) {
-      VariableProxy* target_var = expr->target()->AsVariableProxy();
-      VariableProxy* effective_value_var = GetLeft(value_op)->AsVariableProxy();
-      if (target_var != nullptr && effective_value_var != nullptr &&
-          target_var->var() == effective_value_var->var()) {
-        block_size_--;
-        return;
-      }
+
+    if (as_init) LoadInitFunction();
+    MachineType mtype;
+    bool is_nop = false;
+    EmitAssignmentLhs(expr->target(), &mtype);
+    EmitAssignmentRhs(expr->target(), expr->value(), &is_nop);
+    if (!is_nop) {
+      EmitAssignment(expr, mtype);
     }
-    is_set_op_ = true;
-    RECURSE(Visit(expr->target()));
-    DCHECK(!is_set_op_);
-    // Assignment to heapf32 from float64 converts.
-    if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
-        expr->target()->AsProperty()->obj()->bounds().lower->Is(
-            cache_.kFloat32Array)) {
-      current_function_builder_->Emit(kExprF32ConvertF64);
-    }
-    RECURSE(Visit(expr->value()));
-    if (in_init) {
-      UnLoadInitFunction();
-    }
+    if (as_init) UnLoadInitFunction();
   }
 
   void VisitYield(Yield* expr) { UNREACHABLE(); }
@@ -744,9 +867,7 @@
             Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
             if (nvalue->IsNumber()) {
               int32_t val = static_cast<int32_t>(nvalue->Number());
-              // TODO(bradnelson): variable size
-              byte code[] = {WASM_I32V(val)};
-              current_function_builder_->EmitCode(code, sizeof(code));
+              current_function_builder_->EmitI32Const(val);
               return;
             }
           }
@@ -762,46 +883,41 @@
     }
   }
 
-  void VisitProperty(Property* expr) {
+  void VisitPropertyAndEmitIndex(Property* expr, MachineType* mtype) {
     Expression* obj = expr->obj();
-    DCHECK_EQ(obj->bounds().lower, obj->bounds().upper);
-    Type* type = obj->bounds().lower;
-    MachineType mtype;
+    DCHECK_EQ(bounds_->get(obj).lower, bounds_->get(obj).upper);
+    Type* type = bounds_->get(obj).lower;
     int size;
     if (type->Is(cache_.kUint8Array)) {
-      mtype = MachineType::Uint8();
+      *mtype = MachineType::Uint8();
       size = 1;
     } else if (type->Is(cache_.kInt8Array)) {
-      mtype = MachineType::Int8();
+      *mtype = MachineType::Int8();
       size = 1;
     } else if (type->Is(cache_.kUint16Array)) {
-      mtype = MachineType::Uint16();
+      *mtype = MachineType::Uint16();
       size = 2;
     } else if (type->Is(cache_.kInt16Array)) {
-      mtype = MachineType::Int16();
+      *mtype = MachineType::Int16();
       size = 2;
     } else if (type->Is(cache_.kUint32Array)) {
-      mtype = MachineType::Uint32();
+      *mtype = MachineType::Uint32();
       size = 4;
     } else if (type->Is(cache_.kInt32Array)) {
-      mtype = MachineType::Int32();
+      *mtype = MachineType::Int32();
       size = 4;
     } else if (type->Is(cache_.kUint32Array)) {
-      mtype = MachineType::Uint32();
+      *mtype = MachineType::Uint32();
       size = 4;
     } else if (type->Is(cache_.kFloat32Array)) {
-      mtype = MachineType::Float32();
+      *mtype = MachineType::Float32();
       size = 4;
     } else if (type->Is(cache_.kFloat64Array)) {
-      mtype = MachineType::Float64();
+      *mtype = MachineType::Float64();
       size = 8;
     } else {
       UNREACHABLE();
     }
-    // TODO(titzer): use special asm-compatibility opcodes?
-    current_function_builder_->EmitWithU8U8(
-        WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_), 0, 0);
-    is_set_op_ = false;
     if (size == 1) {
       // Allow more general expression in byte arrays than the spec
       // strictly permits.
@@ -809,87 +925,123 @@
       // places that strictly should be HEAP8[HEAP32[..]>>0].
       RECURSE(Visit(expr->key()));
       return;
-    } else {
-      Literal* value = expr->key()->AsLiteral();
-      if (value) {
-        DCHECK(value->raw_value()->IsNumber());
-        DCHECK_EQ(kAstI32, TypeOf(value));
-        int val = static_cast<int>(value->raw_value()->AsNumber());
-        // TODO(bradnelson): variable size
-        byte code[] = {WASM_I32V(val * size)};
-        current_function_builder_->EmitCode(code, sizeof(code));
-        return;
-      }
-      BinaryOperation* binop = expr->key()->AsBinaryOperation();
-      if (binop) {
-        DCHECK_EQ(Token::SAR, binop->op());
-        DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
-        DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
-        DCHECK_EQ(size,
-                  1 << static_cast<int>(
-                      binop->right()->AsLiteral()->raw_value()->AsNumber()));
-        // Mask bottom bits to match asm.js behavior.
-        current_function_builder_->Emit(kExprI32And);
-        byte code[] = {WASM_I8(~(size - 1))};
-        current_function_builder_->EmitCode(code, sizeof(code));
-        RECURSE(Visit(binop->left()));
-        return;
-      }
+    }
+
+    Literal* value = expr->key()->AsLiteral();
+    if (value) {
+      DCHECK(value->raw_value()->IsNumber());
+      DCHECK_EQ(kAstI32, TypeOf(value));
+      int32_t val = static_cast<int32_t>(value->raw_value()->AsNumber());
+      // TODO(titzer): handle overflow here.
+      current_function_builder_->EmitI32Const(val * size);
+      return;
+    }
+    BinaryOperation* binop = expr->key()->AsBinaryOperation();
+    if (binop) {
+      DCHECK_EQ(Token::SAR, binop->op());
+      DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
+      DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+      DCHECK_EQ(size,
+                1 << static_cast<int>(
+                    binop->right()->AsLiteral()->raw_value()->AsNumber()));
+      // Mask bottom bits to match asm.js behavior.
+      byte mask = static_cast<byte>(~(size - 1));
+      RECURSE(Visit(binop->left()));
+      current_function_builder_->EmitWithU8(kExprI8Const, mask);
+      current_function_builder_->Emit(kExprI32And);
+      return;
     }
     UNREACHABLE();
   }
 
+  void VisitProperty(Property* expr) {
+    MachineType type;
+    VisitPropertyAndEmitIndex(expr, &type);
+    WasmOpcode opcode;
+    if (type == MachineType::Int8()) {
+      opcode = kExprI32AsmjsLoadMem8S;
+    } else if (type == MachineType::Uint8()) {
+      opcode = kExprI32AsmjsLoadMem8U;
+    } else if (type == MachineType::Int16()) {
+      opcode = kExprI32AsmjsLoadMem16S;
+    } else if (type == MachineType::Uint16()) {
+      opcode = kExprI32AsmjsLoadMem16U;
+    } else if (type == MachineType::Int32()) {
+      opcode = kExprI32AsmjsLoadMem;
+    } else if (type == MachineType::Uint32()) {
+      opcode = kExprI32AsmjsLoadMem;
+    } else if (type == MachineType::Float32()) {
+      opcode = kExprF32AsmjsLoadMem;
+    } else if (type == MachineType::Float64()) {
+      opcode = kExprF64AsmjsLoadMem;
+    } else {
+      UNREACHABLE();
+    }
+
+    current_function_builder_->Emit(opcode);
+  }
+
   bool VisitStdlibFunction(Call* call, VariableProxy* expr) {
     Variable* var = expr->var();
     AsmTyper::StandardMember standard_object =
         typer_->VariableAsStandardMember(var);
     ZoneList<Expression*>* args = call->arguments();
     LocalType call_type = TypeOf(call);
+
     switch (standard_object) {
       case AsmTyper::kNone: {
         return false;
       }
       case AsmTyper::kMathAcos: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Acos);
         break;
       }
       case AsmTyper::kMathAsin: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Asin);
         break;
       }
       case AsmTyper::kMathAtan: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Atan);
         break;
       }
       case AsmTyper::kMathCos: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Cos);
         break;
       }
       case AsmTyper::kMathSin: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Sin);
         break;
       }
       case AsmTyper::kMathTan: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Tan);
         break;
       }
       case AsmTyper::kMathExp: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Exp);
         break;
       }
       case AsmTyper::kMathLog: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Log);
         break;
       }
       case AsmTyper::kMathCeil: {
+        VisitCallArgs(call);
         if (call_type == kAstF32) {
           current_function_builder_->Emit(kExprF32Ceil);
         } else if (call_type == kAstF64) {
@@ -900,6 +1052,7 @@
         break;
       }
       case AsmTyper::kMathFloor: {
+        VisitCallArgs(call);
         if (call_type == kAstF32) {
           current_function_builder_->Emit(kExprF32Floor);
         } else if (call_type == kAstF64) {
@@ -910,6 +1063,7 @@
         break;
       }
       case AsmTyper::kMathSqrt: {
+        VisitCallArgs(call);
         if (call_type == kAstF32) {
           current_function_builder_->Emit(kExprF32Sqrt);
         } else if (call_type == kAstF64) {
@@ -920,19 +1074,33 @@
         break;
       }
       case AsmTyper::kMathAbs: {
-        // TODO(bradnelson): Should this be cast to float?
         if (call_type == kAstI32) {
-          current_function_builder_->Emit(kExprIfElse);
-          current_function_builder_->Emit(kExprI32LtS);
-          Visit(args->at(0));
+          uint32_t tmp = current_function_builder_->AddLocal(kAstI32);
+
+          // if set_local(tmp, x) < 0
+          Visit(call->arguments()->at(0));
+          current_function_builder_->EmitSetLocal(tmp);
           byte code[] = {WASM_I8(0)};
           current_function_builder_->EmitCode(code, sizeof(code));
-          current_function_builder_->Emit(kExprI32Sub);
+          current_function_builder_->Emit(kExprI32LtS);
+          current_function_builder_->Emit(kExprIf);
+
+          // then (0 - tmp)
           current_function_builder_->EmitCode(code, sizeof(code));
-          Visit(args->at(0));
+          current_function_builder_->EmitGetLocal(tmp);
+          current_function_builder_->Emit(kExprI32Sub);
+
+          // else tmp
+          current_function_builder_->Emit(kExprElse);
+          current_function_builder_->EmitGetLocal(tmp);
+          // end
+          current_function_builder_->Emit(kExprEnd);
+
         } else if (call_type == kAstF32) {
+          VisitCallArgs(call);
           current_function_builder_->Emit(kExprF32Abs);
         } else if (call_type == kAstF64) {
+          VisitCallArgs(call);
           current_function_builder_->Emit(kExprF64Abs);
         } else {
           UNREACHABLE();
@@ -942,13 +1110,32 @@
       case AsmTyper::kMathMin: {
         // TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
         if (call_type == kAstI32) {
-          current_function_builder_->Emit(kExprIfElse);
+          uint32_t tmp_x = current_function_builder_->AddLocal(kAstI32);
+          uint32_t tmp_y = current_function_builder_->AddLocal(kAstI32);
+
+          // if set_local(tmp_x, x) < set_local(tmp_y, y)
+          Visit(call->arguments()->at(0));
+          current_function_builder_->EmitSetLocal(tmp_x);
+
+          Visit(call->arguments()->at(1));
+          current_function_builder_->EmitSetLocal(tmp_y);
+
           current_function_builder_->Emit(kExprI32LeS);
-          Visit(args->at(0));
-          Visit(args->at(1));
+          current_function_builder_->Emit(kExprIf);
+
+          // then tmp_x
+          current_function_builder_->EmitGetLocal(tmp_x);
+
+          // else tmp_y
+          current_function_builder_->Emit(kExprElse);
+          current_function_builder_->EmitGetLocal(tmp_y);
+          current_function_builder_->Emit(kExprEnd);
+
         } else if (call_type == kAstF32) {
+          VisitCallArgs(call);
           current_function_builder_->Emit(kExprF32Min);
         } else if (call_type == kAstF64) {
+          VisitCallArgs(call);
           current_function_builder_->Emit(kExprF64Min);
         } else {
           UNREACHABLE();
@@ -958,13 +1145,33 @@
       case AsmTyper::kMathMax: {
         // TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
         if (call_type == kAstI32) {
-          current_function_builder_->Emit(kExprIfElse);
-          current_function_builder_->Emit(kExprI32GtS);
-          Visit(args->at(0));
-          Visit(args->at(1));
+          uint32_t tmp_x = current_function_builder_->AddLocal(kAstI32);
+          uint32_t tmp_y = current_function_builder_->AddLocal(kAstI32);
+
+          // if set_local(tmp_x, x) < set_local(tmp_y, y)
+          Visit(call->arguments()->at(0));
+
+          current_function_builder_->EmitSetLocal(tmp_x);
+
+          Visit(call->arguments()->at(1));
+          current_function_builder_->EmitSetLocal(tmp_y);
+
+          current_function_builder_->Emit(kExprI32LeS);
+          current_function_builder_->Emit(kExprIf);
+
+          // then tmp_y
+          current_function_builder_->EmitGetLocal(tmp_y);
+
+          // else tmp_x
+          current_function_builder_->Emit(kExprElse);
+          current_function_builder_->EmitGetLocal(tmp_x);
+          current_function_builder_->Emit(kExprEnd);
+
         } else if (call_type == kAstF32) {
+          VisitCallArgs(call);
           current_function_builder_->Emit(kExprF32Max);
         } else if (call_type == kAstF64) {
+          VisitCallArgs(call);
           current_function_builder_->Emit(kExprF64Max);
         } else {
           UNREACHABLE();
@@ -972,16 +1179,19 @@
         break;
       }
       case AsmTyper::kMathAtan2: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Atan2);
         break;
       }
       case AsmTyper::kMathPow: {
+        VisitCallArgs(call);
         DCHECK_EQ(kAstF64, call_type);
         current_function_builder_->Emit(kExprF64Pow);
         break;
       }
       case AsmTyper::kMathImul: {
+        VisitCallArgs(call);
         current_function_builder_->Emit(kExprI32Mul);
         break;
       }
@@ -989,6 +1199,7 @@
         DCHECK(args->length() == 1);
         Literal* literal = args->at(0)->AsLiteral();
         if (literal != nullptr) {
+          // constant fold Math.fround(#const);
           if (literal->raw_value()->IsNumber()) {
             float val = static_cast<float>(literal->raw_value()->AsNumber());
             byte code[] = {WASM_F32(val)};
@@ -996,6 +1207,7 @@
             return true;
           }
         }
+        VisitCallArgs(call);
         switch (TypeIndexOf(args->at(0))) {
           case kInt32:
           case kFixnum:
@@ -1019,7 +1231,6 @@
         break;
       }
     }
-    VisitCallArgs(call);
     return true;
   }
 
@@ -1035,17 +1246,17 @@
     Call::CallType call_type = expr->GetCallType(isolate_);
     switch (call_type) {
       case Call::OTHER_CALL: {
-        DCHECK(in_function_);
+        DCHECK_EQ(kFuncScope, scope_);
         VariableProxy* proxy = expr->expression()->AsVariableProxy();
         if (proxy != nullptr) {
           if (VisitStdlibFunction(expr, proxy)) {
             return;
           }
         }
-        uint16_t index;
+        uint32_t index;
         VariableProxy* vp = expr->expression()->AsVariableProxy();
         if (vp != nullptr &&
-            Type::Any()->Is(vp->bounds().lower->AsFunction()->Result())) {
+            Type::Any()->Is(bounds_->get(vp).lower->AsFunction()->Result())) {
           LocalType return_type = TypeOf(expr);
           ZoneList<Expression*>* args = expr->arguments();
           FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
@@ -1058,35 +1269,38 @@
           }
           index =
               imported_function_table_.GetFunctionIndex(vp->var(), sig.Build());
+          VisitCallArgs(expr);
+          current_function_builder_->Emit(kExprCallImport);
+          current_function_builder_->EmitVarInt(expr->arguments()->length());
+          current_function_builder_->EmitVarInt(index);
         } else {
           index = LookupOrInsertFunction(vp->var());
+          VisitCallArgs(expr);
+          current_function_builder_->Emit(kExprCallFunction);
+          current_function_builder_->EmitVarInt(expr->arguments()->length());
+          current_function_builder_->EmitVarInt(index);
         }
-        current_function_builder_->Emit(kExprCallFunction);
-        std::vector<uint8_t> index_arr = UnsignedLEB128From(index);
-        current_function_builder_->EmitCode(
-            &index_arr[0], static_cast<uint32_t>(index_arr.size()));
         break;
       }
       case Call::KEYED_PROPERTY_CALL: {
-        DCHECK(in_function_);
+        DCHECK_EQ(kFuncScope, scope_);
         Property* p = expr->expression()->AsProperty();
         DCHECK_NOT_NULL(p);
         VariableProxy* var = p->obj()->AsVariableProxy();
         DCHECK_NOT_NULL(var);
         FunctionTableIndices* indices = LookupFunctionTable(var->var());
-        current_function_builder_->EmitWithVarInt(kExprCallIndirect,
-                                                  indices->signature_index);
-        current_function_builder_->Emit(kExprI32Add);
-        // TODO(bradnelson): variable size
-        byte code[] = {WASM_I32V(indices->start_index)};
-        current_function_builder_->EmitCode(code, sizeof(code));
         RECURSE(Visit(p->key()));
+        current_function_builder_->EmitI32Const(indices->start_index);
+        current_function_builder_->Emit(kExprI32Add);
+        VisitCallArgs(expr);
+        current_function_builder_->Emit(kExprCallIndirect);
+        current_function_builder_->EmitVarInt(expr->arguments()->length());
+        current_function_builder_->EmitVarInt(indices->signature_index);
         break;
       }
       default:
         UNREACHABLE();
     }
-    VisitCallArgs(expr);
   }
 
   void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
@@ -1094,6 +1308,7 @@
   void VisitCallRuntime(CallRuntime* expr) { UNREACHABLE(); }
 
   void VisitUnaryOperation(UnaryOperation* expr) {
+    RECURSE(Visit(expr->expression()));
     switch (expr->op()) {
       case Token::NOT: {
         DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
@@ -1103,7 +1318,6 @@
       default:
         UNREACHABLE();
     }
-    RECURSE(Visit(expr->expression()));
   }
 
   void VisitCountOperation(CountOperation* expr) { UNREACHABLE(); }
@@ -1207,9 +1421,6 @@
 #ifdef Mul
 #undef Mul
 #endif
-#ifdef Div
-#undef Div
-#endif
 
 #define NON_SIGNED_BINOP(op)      \
   static WasmOpcode opcodes[] = { \
@@ -1249,6 +1460,7 @@
   void VisitBinaryOperation(BinaryOperation* expr) {
     ConvertOperation convertOperation = MatchBinaryOperation(expr);
     if (convertOperation == kToDouble) {
+      RECURSE(Visit(expr->left()));
       TypeIndex type = TypeIndexOf(expr->left());
       if (type == kInt32 || type == kFixnum) {
         current_function_builder_->Emit(kExprF64SConvertI32);
@@ -1259,37 +1471,53 @@
       } else {
         UNREACHABLE();
       }
-      RECURSE(Visit(expr->left()));
     } else if (convertOperation == kToInt) {
+      RECURSE(Visit(GetLeft(expr)));
       TypeIndex type = TypeIndexOf(GetLeft(expr));
       if (type == kFloat32) {
-        current_function_builder_->Emit(kExprI32SConvertF32);
+        current_function_builder_->Emit(kExprI32AsmjsSConvertF32);
       } else if (type == kFloat64) {
-        current_function_builder_->Emit(kExprI32SConvertF64);
+        current_function_builder_->Emit(kExprI32AsmjsSConvertF64);
       } else {
         UNREACHABLE();
       }
-      RECURSE(Visit(GetLeft(expr)));
     } else if (convertOperation == kAsIs) {
       RECURSE(Visit(GetLeft(expr)));
     } else {
+      if (expr->op() == Token::COMMA) {
+        current_function_builder_->Emit(kExprBlock);
+      }
+
+      RECURSE(Visit(expr->left()));
+      RECURSE(Visit(expr->right()));
+
+      if (expr->op() == Token::COMMA) {
+        current_function_builder_->Emit(kExprEnd);
+      }
+
       switch (expr->op()) {
         BINOP_CASE(Token::ADD, Add, NON_SIGNED_BINOP, true);
         BINOP_CASE(Token::SUB, Sub, NON_SIGNED_BINOP, true);
         BINOP_CASE(Token::MUL, Mul, NON_SIGNED_BINOP, true);
-        BINOP_CASE(Token::DIV, Div, SIGNED_BINOP, false);
         BINOP_CASE(Token::BIT_OR, Ior, NON_SIGNED_INT_BINOP, true);
         BINOP_CASE(Token::BIT_AND, And, NON_SIGNED_INT_BINOP, true);
         BINOP_CASE(Token::BIT_XOR, Xor, NON_SIGNED_INT_BINOP, true);
         BINOP_CASE(Token::SHL, Shl, NON_SIGNED_INT_BINOP, true);
         BINOP_CASE(Token::SAR, ShrS, NON_SIGNED_INT_BINOP, true);
         BINOP_CASE(Token::SHR, ShrU, NON_SIGNED_INT_BINOP, true);
+        case Token::DIV: {
+          static WasmOpcode opcodes[] = {kExprI32AsmjsDivS, kExprI32AsmjsDivU,
+                                         kExprF32Div, kExprF64Div};
+          int type = TypeIndexOf(expr->left(), expr->right(), false);
+          current_function_builder_->Emit(opcodes[type]);
+          break;
+        }
         case Token::MOD: {
           TypeIndex type = TypeIndexOf(expr->left(), expr->right(), false);
           if (type == kInt32) {
-            current_function_builder_->Emit(kExprI32RemS);
+            current_function_builder_->Emit(kExprI32AsmjsRemS);
           } else if (type == kUint32) {
-            current_function_builder_->Emit(kExprI32RemU);
+            current_function_builder_->Emit(kExprI32AsmjsRemU);
           } else if (type == kFloat64) {
             current_function_builder_->Emit(kExprF64Mod);
             return;
@@ -1299,31 +1527,17 @@
           break;
         }
         case Token::COMMA: {
-          current_function_builder_->EmitWithVarInt(kExprBlock, 2);
           break;
         }
         default:
           UNREACHABLE();
       }
-      RECURSE(Visit(expr->left()));
-      RECURSE(Visit(expr->right()));
-    }
-  }
-
-  void AddLeb128(uint32_t index, bool is_local) {
-    std::vector<uint8_t> index_vec = UnsignedLEB128From(index);
-    if (is_local) {
-      uint32_t pos_of_index[1] = {0};
-      current_function_builder_->EmitCode(
-          &index_vec[0], static_cast<uint32_t>(index_vec.size()), pos_of_index,
-          1);
-    } else {
-      current_function_builder_->EmitCode(
-          &index_vec[0], static_cast<uint32_t>(index_vec.size()));
     }
   }
 
   void VisitCompareOperation(CompareOperation* expr) {
+    RECURSE(Visit(expr->left()));
+    RECURSE(Visit(expr->right()));
     switch (expr->op()) {
       BINOP_CASE(Token::EQ, Eq, NON_SIGNED_BINOP, false);
       BINOP_CASE(Token::LT, Lt, SIGNED_BINOP, false);
@@ -1333,8 +1547,6 @@
       default:
         UNREACHABLE();
     }
-    RECURSE(Visit(expr->left()));
-    RECURSE(Visit(expr->right()));
   }
 
 #undef BINOP_CASE
@@ -1369,8 +1581,8 @@
   }
 
   TypeIndex TypeIndexOf(Expression* expr) {
-    DCHECK_EQ(expr->bounds().lower, expr->bounds().upper);
-    Type* type = expr->bounds().lower;
+    DCHECK_EQ(bounds_->get(expr).lower, bounds_->get(expr).upper);
+    Type* type = bounds_->get(expr).lower;
     if (type->Is(cache_.kAsmFixnum)) {
       return kFixnum;
     } else if (type->Is(cache_.kAsmSigned)) {
@@ -1422,20 +1634,17 @@
   void VisitRewritableExpression(RewritableExpression* expr) { UNREACHABLE(); }
 
   struct IndexContainer : public ZoneObject {
-    uint16_t index;
+    uint32_t index;
   };
 
-  uint16_t LookupOrInsertLocal(Variable* v, LocalType type) {
+  uint32_t LookupOrInsertLocal(Variable* v, LocalType type) {
     DCHECK_NOT_NULL(current_function_builder_);
     ZoneHashMap::Entry* entry =
         local_variables_.Lookup(v, ComputePointerHash(v));
     if (entry == nullptr) {
-      uint16_t index;
-      if (v->IsParameter()) {
-        index = current_function_builder_->AddParam(type);
-      } else {
-        index = current_function_builder_->AddLocal(type);
-      }
+      uint32_t index;
+      DCHECK(!v->IsParameter());
+      index = current_function_builder_->AddLocal(type);
       IndexContainer* container = new (zone()) IndexContainer();
       container->index = index;
       entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
@@ -1445,11 +1654,24 @@
     return (reinterpret_cast<IndexContainer*>(entry->value))->index;
   }
 
-  uint16_t LookupOrInsertGlobal(Variable* v, LocalType type) {
+  void InsertParameter(Variable* v, LocalType type, uint32_t index) {
+    DCHECK(v->IsParameter());
+    DCHECK_NOT_NULL(current_function_builder_);
+    ZoneHashMap::Entry* entry =
+        local_variables_.Lookup(v, ComputePointerHash(v));
+    DCHECK_NULL(entry);
+    IndexContainer* container = new (zone()) IndexContainer();
+    container->index = index;
+    entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
+                                            ZoneAllocationPolicy(zone()));
+    entry->value = container;
+  }
+
+  uint32_t LookupOrInsertGlobal(Variable* v, LocalType type) {
     ZoneHashMap::Entry* entry =
         global_variables_.Lookup(v, ComputePointerHash(v));
     if (entry == nullptr) {
-      uint16_t index =
+      uint32_t index =
           builder_->AddGlobal(WasmOpcodes::MachineTypeFor(type), 0);
       IndexContainer* container = new (zone()) IndexContainer();
       container->index = index;
@@ -1460,11 +1682,11 @@
     return (reinterpret_cast<IndexContainer*>(entry->value))->index;
   }
 
-  uint16_t LookupOrInsertFunction(Variable* v) {
+  uint32_t LookupOrInsertFunction(Variable* v) {
     DCHECK_NOT_NULL(builder_);
     ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
     if (entry == nullptr) {
-      uint16_t index = builder_->AddFunction();
+      uint32_t index = builder_->AddFunction();
       IndexContainer* container = new (zone()) IndexContainer();
       container->index = index;
       entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
@@ -1475,8 +1697,8 @@
   }
 
   LocalType TypeOf(Expression* expr) {
-    DCHECK_EQ(expr->bounds().lower, expr->bounds().upper);
-    return TypeFrom(expr->bounds().lower);
+    DCHECK_EQ(bounds_->get(expr).lower, bounds_->get(expr).upper);
+    return TypeFrom(bounds_->get(expr).lower);
   }
 
   LocalType TypeFrom(Type* type) {
@@ -1496,9 +1718,7 @@
   ZoneHashMap local_variables_;
   ZoneHashMap functions_;
   ZoneHashMap global_variables_;
-  bool in_function_;
-  bool is_set_op_;
-  bool marking_exported;
+  AsmScope scope_;
   WasmModuleBuilder* builder_;
   WasmFunctionBuilder* current_function_builder_;
   FunctionLiteral* literal_;
@@ -1508,11 +1728,11 @@
   AsmTyper* typer_;
   TypeCache const& cache_;
   ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
-  int block_size_;
-  uint16_t init_function_index_;
+  uint32_t init_function_index_;
   uint32_t next_table_index_;
   ZoneHashMap function_tables_;
   ImportedFunctionTable imported_function_table_;
+  const AstTypeBounds* bounds_;
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
diff --git a/src/wasm/ast-decoder.cc b/src/wasm/ast-decoder.cc
index e2f6a04..b8a86c3 100644
--- a/src/wasm/ast-decoder.cc
+++ b/src/wasm/ast-decoder.cc
@@ -42,17 +42,6 @@
   WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc); }
 };
 
-// A production represents an incomplete decoded tree in the LR decoder.
-struct Production {
-  Tree* tree;  // the root of the syntax tree.
-  int index;   // the current index into the children of the tree.
-
-  WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc()); }
-  const byte* pc() const { return tree->pc; }
-  bool done() const { return index >= static_cast<int>(tree->count); }
-  Tree* last() const { return index > 0 ? tree->children[index - 1] : nullptr; }
-};
-
 // An SsaEnv environment carries the current local variable renaming
 // as well as the current effect and control dependency in the TF graph.
 // It maintains a control state that tracks whether the environment
@@ -72,19 +61,30 @@
     control = nullptr;
     effect = nullptr;
   }
+  void SetNotMerged() {
+    if (state == kMerged) state = kReached;
+  }
 };
 
-// An entry in the stack of blocks during decoding.
-struct Block {
-  SsaEnv* ssa_env;  // SSA renaming environment.
-  int stack_depth;  // production stack depth.
+// An entry on the value stack.
+struct Value {
+  const byte* pc;
+  TFNode* node;
+  LocalType type;
 };
 
-// An entry in the stack of ifs during decoding.
-struct IfEnv {
-  SsaEnv* false_env;
-  SsaEnv* merge_env;
-  SsaEnv** case_envs;
+// An entry on the control stack (i.e. if, block, loop).
+struct Control {
+  const byte* pc;
+  int stack_depth;    // stack height at the beginning of the construct.
+  SsaEnv* end_env;    // end environment for the construct.
+  SsaEnv* false_env;  // false environment (only for if).
+  TFNode* node;       // result node for the construct.
+  LocalType type;     // result type for the construct.
+  bool is_loop;       // true if this is the inner label of a loop.
+
+  bool is_if() { return *pc == kExprIf; }
+  bool is_block() { return *pc == kExprBlock; }
 };
 
 // Macros that build nodes only if there is a graph and the current SSA
@@ -157,30 +157,50 @@
     return false;
   }
 
-  inline bool Validate(const byte* pc, FunctionIndexOperand& operand) {
+  inline bool Validate(const byte* pc, CallFunctionOperand& operand) {
     ModuleEnv* m = module_;
     if (m && m->module && operand.index < m->module->functions.size()) {
       operand.sig = m->module->functions[operand.index].sig;
+      uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
+      if (operand.arity != expected) {
+        error(pc, pc + 1,
+              "arity mismatch in direct function call (expected %u, got %u)",
+              expected, operand.arity);
+        return false;
+      }
       return true;
     }
     error(pc, pc + 1, "invalid function index");
     return false;
   }
 
-  inline bool Validate(const byte* pc, SignatureIndexOperand& operand) {
+  inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
     ModuleEnv* m = module_;
     if (m && m->module && operand.index < m->module->signatures.size()) {
       operand.sig = m->module->signatures[operand.index];
+      uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
+      if (operand.arity != expected) {
+        error(pc, pc + 1,
+              "arity mismatch in indirect function call (expected %u, got %u)",
+              expected, operand.arity);
+        return false;
+      }
       return true;
     }
     error(pc, pc + 1, "invalid signature index");
     return false;
   }
 
-  inline bool Validate(const byte* pc, ImportIndexOperand& operand) {
+  inline bool Validate(const byte* pc, CallImportOperand& operand) {
     ModuleEnv* m = module_;
     if (m && m->module && operand.index < m->module->import_table.size()) {
       operand.sig = m->module->import_table[operand.index].sig;
+      uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
+      if (operand.arity != expected) {
+        error(pc, pc + 1, "arity mismatch in import call (expected %u, got %u)",
+              expected, operand.arity);
+        return false;
+      }
       return true;
     }
     error(pc, pc + 1, "invalid signature index");
@@ -188,9 +208,13 @@
   }
 
   inline bool Validate(const byte* pc, BreakDepthOperand& operand,
-                       ZoneVector<Block>& blocks) {
-    if (operand.depth < blocks.size()) {
-      operand.target = &blocks[blocks.size() - operand.depth - 1];
+                       ZoneVector<Control>& control) {
+    if (operand.arity > 1) {
+      error(pc, pc + 1, "invalid arity for br or br_if");
+      return false;
+    }
+    if (operand.depth < control.size()) {
+      operand.target = &control[control.size() - operand.depth - 1];
       return true;
     }
     error(pc, pc + 1, "invalid break depth");
@@ -199,6 +223,10 @@
 
   bool Validate(const byte* pc, BranchTableOperand& operand,
                 size_t block_depth) {
+    if (operand.arity > 1) {
+      error(pc, pc + 1, "invalid arity for break");
+      return false;
+    }
     // Verify table.
     for (uint32_t i = 0; i < operand.table_count + 1; i++) {
       uint32_t target = operand.read_entry(this, i);
@@ -229,46 +257,49 @@
       case kExprLoadGlobal:
       case kExprNop:
       case kExprUnreachable:
+      case kExprEnd:
+      case kExprBlock:
+      case kExprLoop:
         return 0;
 
-      case kExprBr:
       case kExprStoreGlobal:
       case kExprSetLocal:
+      case kExprElse:
         return 1;
 
+      case kExprBr: {
+        BreakDepthOperand operand(this, pc);
+        return operand.arity;
+      }
+      case kExprBrIf: {
+        BreakDepthOperand operand(this, pc);
+        return 1 + operand.arity;
+      }
+      case kExprBrTable: {
+        BranchTableOperand operand(this, pc);
+        return 1 + operand.arity;
+      }
+
       case kExprIf:
-      case kExprBrIf:
-        return 2;
-      case kExprIfElse:
+        return 1;
       case kExprSelect:
         return 3;
 
-      case kExprBlock:
-      case kExprLoop: {
-        BlockCountOperand operand(this, pc);
-        return operand.count;
-      }
-
       case kExprCallFunction: {
-        FunctionIndexOperand operand(this, pc);
-        return static_cast<int>(
-            module_->GetFunctionSignature(operand.index)->parameter_count());
+        CallFunctionOperand operand(this, pc);
+        return operand.arity;
       }
       case kExprCallIndirect: {
-        SignatureIndexOperand operand(this, pc);
-        return 1 + static_cast<int>(
-                       module_->GetSignature(operand.index)->parameter_count());
+        CallIndirectOperand operand(this, pc);
+        return 1 + operand.arity;
       }
       case kExprCallImport: {
-        ImportIndexOperand operand(this, pc);
-        return static_cast<int>(
-            module_->GetImportSignature(operand.index)->parameter_count());
+        CallImportOperand operand(this, pc);
+        return operand.arity;
       }
       case kExprReturn: {
-        return static_cast<int>(sig_->return_count());
-      }
-      case kExprBrTable: {
-        return 1;
+        ReturnArityOperand operand(this, pc);
+        return operand.arity;
       }
 
 #define DECLARE_OPCODE_CASE(name, opcode, sig) \
@@ -281,7 +312,6 @@
         FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
         FOREACH_ASMJS_COMPAT_OPCODE(DECLARE_OPCODE_CASE)
 #undef DECLARE_OPCODE_CASE
-      case kExprDeclLocals:
       default:
         UNREACHABLE();
         return 0;
@@ -298,11 +328,6 @@
         MemoryAccessOperand operand(this, pc);
         return 1 + operand.length;
       }
-      case kExprBlock:
-      case kExprLoop: {
-        BlockCountOperand operand(this, pc);
-        return 1 + operand.length;
-      }
       case kExprBr:
       case kExprBrIf: {
         BreakDepthOperand operand(this, pc);
@@ -315,15 +340,15 @@
       }
 
       case kExprCallFunction: {
-        FunctionIndexOperand operand(this, pc);
+        CallFunctionOperand operand(this, pc);
         return 1 + operand.length;
       }
       case kExprCallIndirect: {
-        SignatureIndexOperand operand(this, pc);
+        CallIndirectOperand operand(this, pc);
         return 1 + operand.length;
       }
       case kExprCallImport: {
-        ImportIndexOperand operand(this, pc);
+        CallImportOperand operand(this, pc);
         return 1 + operand.length;
       }
 
@@ -350,6 +375,10 @@
         return 5;
       case kExprF64Const:
         return 9;
+      case kExprReturn: {
+        ReturnArityOperand operand(this, pc);
+        return 1 + operand.length;
+      }
 
       default:
         return 1;
@@ -357,7 +386,6 @@
   }
 };
 
-
 // A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
 // shift-reduce strategy with multiple internal stacks.
 class SR_WasmDecoder : public WasmDecoder {
@@ -368,55 +396,62 @@
         builder_(builder),
         base_(body.base),
         local_type_vec_(zone),
-        trees_(zone),
         stack_(zone),
-        blocks_(zone),
-        ifs_(zone) {
+        control_(zone) {
     local_types_ = &local_type_vec_;
   }
 
-  TreeResult Decode() {
+  bool Decode() {
+    base::ElapsedTimer decode_timer;
+    if (FLAG_trace_wasm_decode_time) {
+      decode_timer.Start();
+    }
+    stack_.clear();
+    control_.clear();
+
     if (end_ < pc_) {
       error(pc_, "function body end < start");
-      return result_;
+      return false;
     }
 
     DecodeLocalDecls();
     InitSsaEnv();
     DecodeFunctionBody();
 
-    Tree* tree = nullptr;
-    if (ok()) {
-      if (ssa_env_->go()) {
-        if (stack_.size() > 0) {
-          error(stack_.back().pc(), end_, "fell off end of code");
-        }
-        AddImplicitReturnAtEnd();
-      }
-      if (trees_.size() == 0) {
-        if (sig_->return_count() > 0) {
-          error(start_, "no trees created");
-        }
-      } else {
-        tree = trees_[0];
-      }
+    if (failed()) return TraceFailed();
+
+    if (!control_.empty()) {
+      error(pc_, control_.back().pc, "unterminated control structure");
+      return TraceFailed();
     }
 
-    if (ok()) {
-      TRACE("wasm-decode ok\n");
+    if (ssa_env_->go()) {
+      TRACE("  @%-6d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
+      DoReturn();
+      if (failed()) return TraceFailed();
+      TRACE("\n");
+    }
+
+    if (FLAG_trace_wasm_decode_time) {
+      double ms = decode_timer.Elapsed().InMillisecondsF();
+      PrintF("wasm-decode ok (%0.3f ms)\n\n", ms);
     } else {
-      TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
-            startrel(error_pc_), error_msg_.get());
+      TRACE("wasm-decode ok\n\n");
     }
 
-    return toResult(tree);
+    return true;
+  }
+
+  bool TraceFailed() {
+    TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
+          startrel(error_pc_), error_msg_.get());
+    return false;
   }
 
   bool DecodeLocalDecls(AstLocalDecls& decls) {
     DecodeLocalDecls();
     if (failed()) return false;
     decls.decls_encoded_size = pc_offset();
-    decls.total_local_count = 0;
     decls.local_types.reserve(local_type_vec_.size());
     for (size_t pos = 0; pos < local_type_vec_.size();) {
       uint32_t count = 0;
@@ -425,9 +460,9 @@
         pos++;
         count++;
       }
-      decls.total_local_count += count;
       decls.local_types.push_back(std::pair<LocalType, uint32_t>(type, count));
     }
+    decls.total_local_count = static_cast<uint32_t>(local_type_vec_.size());
     return true;
   }
 
@@ -448,15 +483,12 @@
   Zone* zone_;
   TFBuilder* builder_;
   const byte* base_;
-  TreeResult result_;
 
   SsaEnv* ssa_env_;
 
-  ZoneVector<LocalType> local_type_vec_;
-  ZoneVector<Tree*> trees_;
-  ZoneVector<Production> stack_;
-  ZoneVector<Block> blocks_;
-  ZoneVector<IfEnv> ifs_;
+  ZoneVector<LocalType> local_type_vec_;  // types of local variables.
+  ZoneVector<Value> stack_;               // stack of values.
+  ZoneVector<Control> control_;           // stack of blocks, loops, and ifs.
 
   inline bool build() { return builder_ && ssa_env_->go(); }
 
@@ -508,53 +540,6 @@
     }
   }
 
-  void Leaf(LocalType type, TFNode* node = nullptr) {
-    size_t size = sizeof(Tree);
-    Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
-    tree->type = type;
-    tree->count = 0;
-    tree->pc = pc_;
-    tree->node = node;
-    tree->children[0] = nullptr;
-    Reduce(tree);
-  }
-
-  void Shift(LocalType type, uint32_t count) {
-    size_t size =
-        sizeof(Tree) + (count == 0 ? 0 : ((count - 1) * sizeof(Tree*)));
-    Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
-    tree->type = type;
-    tree->count = count;
-    tree->pc = pc_;
-    tree->node = nullptr;
-    for (uint32_t i = 0; i < count; i++) tree->children[i] = nullptr;
-    if (count == 0) {
-      Production p = {tree, 0};
-      Reduce(&p);
-      Reduce(tree);
-    } else {
-      stack_.push_back({tree, 0});
-    }
-  }
-
-  void Reduce(Tree* tree) {
-    while (true) {
-      if (stack_.size() == 0) {
-        trees_.push_back(tree);
-        break;
-      }
-      Production* p = &stack_.back();
-      p->tree->children[p->index++] = tree;
-      Reduce(p);
-      if (p->done()) {
-        tree = p->tree;
-        stack_.pop_back();
-      } else {
-        break;
-      }
-    }
-  }
-
   char* indentation() {
     static const int kMaxIndent = 64;
     static char bytes[kMaxIndent + 1];
@@ -605,11 +590,11 @@
     total_locals_ = local_type_vec_.size();
   }
 
-  // Decodes the body of a function, producing reduced trees into {result}.
+  // Decodes the body of a function.
   void DecodeFunctionBody() {
-    TRACE("wasm-decode %p...%p (%d bytes) %s\n",
+    TRACE("wasm-decode %p...%p (module+%d, %d bytes) %s\n",
           reinterpret_cast<const void*>(start_),
-          reinterpret_cast<const void*>(limit_),
+          reinterpret_cast<const void*>(limit_), baserel(pc_),
           static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
 
     if (pc_ >= limit_) return;  // Nothing to do.
@@ -617,49 +602,45 @@
     while (true) {  // decoding loop.
       int len = 1;
       WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
-      TRACE("wasm-decode module+%-6d %s func+%d: 0x%02x %s\n", baserel(pc_),
-            indentation(), startrel(pc_), opcode,
-            WasmOpcodes::OpcodeName(opcode));
+      TRACE("  @%-6d #%02x:%-20s|", startrel(pc_), opcode,
+            WasmOpcodes::ShortOpcodeName(opcode));
 
       FunctionSig* sig = WasmOpcodes::Signature(opcode);
       if (sig) {
-        // A simple expression with a fixed signature.
-        Shift(sig->GetReturn(), static_cast<uint32_t>(sig->parameter_count()));
-        pc_ += len;
-        if (pc_ >= limit_) {
-          // End of code reached or exceeded.
-          if (pc_ > limit_ && ok()) {
-            error("Beyond end of code");
+        // Fast case of a simple operator.
+        TFNode* node;
+        switch (sig->parameter_count()) {
+          case 1: {
+            Value val = Pop(0, sig->GetParam(0));
+            node = BUILD(Unop, opcode, val.node, position());
+            break;
           }
-          return;
+          case 2: {
+            Value rval = Pop(1, sig->GetParam(1));
+            Value lval = Pop(0, sig->GetParam(0));
+            node = BUILD(Binop, opcode, lval.node, rval.node, position());
+            break;
+          }
+          default:
+            UNREACHABLE();
+            node = nullptr;
+            break;
         }
-        continue;  // back to decoding loop.
-      }
-
-      switch (opcode) {
-        case kExprNop:
-          Leaf(kAstStmt);
-          break;
-        case kExprBlock: {
-          BlockCountOperand operand(this, pc_);
-          if (operand.count < 1) {
-            Leaf(kAstStmt);
-          } else {
-            Shift(kAstEnd, operand.count);
+        Push(GetReturnType(sig), node);
+      } else {
+        // Complex bytecode.
+        switch (opcode) {
+          case kExprNop:
+            Push(kAstStmt, nullptr);
+            break;
+          case kExprBlock: {
             // The break environment is the outer environment.
             SsaEnv* break_env = ssa_env_;
             PushBlock(break_env);
             SetEnv("block:start", Steal(break_env));
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprLoop: {
-          BlockCountOperand operand(this, pc_);
-          if (operand.count < 1) {
-            Leaf(kAstStmt);
-          } else {
-            Shift(kAstEnd, operand.count);
+          case kExprLoop: {
             // The break environment is the outer environment.
             SsaEnv* break_env = ssa_env_;
             PushBlock(break_env);
@@ -667,268 +648,535 @@
             // The continue environment is the inner environment.
             PrepareForLoop(pc_, cont_env);
             SetEnv("loop:start", Split(cont_env));
-            if (ssa_env_->go()) ssa_env_->state = SsaEnv::kReached;
-            PushBlock(cont_env);
-            blocks_.back().stack_depth = -1;  // no production for inner block.
+            ssa_env_->SetNotMerged();
+            PushLoop(cont_env);
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprIf:
-          Shift(kAstStmt, 2);
-          break;
-        case kExprIfElse:
-          Shift(kAstEnd, 3);  // Result type is typeof(x) in {c ? x : y}.
-          break;
-        case kExprSelect:
-          Shift(kAstStmt, 3);  // Result type is typeof(x) in {c ? x : y}.
-          break;
-        case kExprBr: {
-          BreakDepthOperand operand(this, pc_);
-          if (Validate(pc_, operand, blocks_)) {
-            Shift(kAstEnd, 1);
+          case kExprIf: {
+            // Condition on top of stack. Split environments for branches.
+            Value cond = Pop(0, kAstI32);
+            TFNode* if_true = nullptr;
+            TFNode* if_false = nullptr;
+            BUILD(Branch, cond.node, &if_true, &if_false);
+            SsaEnv* end_env = ssa_env_;
+            SsaEnv* false_env = Split(ssa_env_);
+            false_env->control = if_false;
+            SsaEnv* true_env = Steal(ssa_env_);
+            true_env->control = if_true;
+            PushIf(end_env, false_env);
+            SetEnv("if:true", true_env);
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprBrIf: {
-          BreakDepthOperand operand(this, pc_);
-          if (Validate(pc_, operand, blocks_)) {
-            Shift(kAstStmt, 2);
+          case kExprElse: {
+            if (control_.empty()) {
+              error(pc_, "else does not match any if");
+              break;
+            }
+            Control* c = &control_.back();
+            if (!c->is_if()) {
+              error(pc_, c->pc, "else does not match an if");
+              break;
+            }
+            if (c->false_env == nullptr) {
+              error(pc_, c->pc, "else already present for if");
+              break;
+            }
+            Value val = PopUpTo(c->stack_depth);
+            MergeInto(c->end_env, &c->node, &c->type, val);
+            // Switch to environment for false branch.
+            SetEnv("if_else:false", c->false_env);
+            c->false_env = nullptr;  // record that an else is already seen
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprBrTable: {
-          BranchTableOperand operand(this, pc_);
-          if (Validate(pc_, operand, blocks_.size())) {
-            Shift(kAstEnd, 1);
+          case kExprEnd: {
+            if (control_.empty()) {
+              error(pc_, "end does not match any if or block");
+              break;
+            }
+            const char* name = "block:end";
+            Control* c = &control_.back();
+            if (c->is_loop) {
+              // Loops always push control in pairs.
+              control_.pop_back();
+              c = &control_.back();
+              name = "loop:end";
+            }
+            Value val = PopUpTo(c->stack_depth);
+            if (c->is_if()) {
+              if (c->false_env != nullptr) {
+                // End the true branch of a one-armed if.
+                Goto(c->false_env, c->end_env);
+                val = {val.pc, nullptr, kAstStmt};
+                name = "if:merge";
+              } else {
+                // End the false branch of a two-armed if.
+                name = "if_else:merge";
+              }
+            }
+            if (ssa_env_->go()) {
+              MergeInto(c->end_env, &c->node, &c->type, val);
+            }
+            SetEnv(name, c->end_env);
+            stack_.resize(c->stack_depth);
+            Push(c->type, c->node);
+            control_.pop_back();
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprReturn: {
-          int count = static_cast<int>(sig_->return_count());
-          if (count == 0) {
-            BUILD(Return, 0, builder_->Buffer(0));
-            ssa_env_->Kill();
-            Leaf(kAstEnd);
-          } else {
-            Shift(kAstEnd, count);
+          case kExprSelect: {
+            Value cond = Pop(2, kAstI32);
+            Value fval = Pop();
+            Value tval = Pop();
+            if (tval.type == kAstStmt || tval.type != fval.type) {
+              if (tval.type != kAstEnd && fval.type != kAstEnd) {
+                error(pc_, "type mismatch in select");
+                break;
+              }
+            }
+            if (build()) {
+              DCHECK(tval.type != kAstEnd);
+              DCHECK(fval.type != kAstEnd);
+              DCHECK(cond.type != kAstEnd);
+              TFNode* controls[2];
+              builder_->Branch(cond.node, &controls[0], &controls[1]);
+              TFNode* merge = builder_->Merge(2, controls);
+              TFNode* vals[2] = {tval.node, fval.node};
+              TFNode* phi = builder_->Phi(tval.type, 2, vals, merge);
+              Push(tval.type, phi);
+              ssa_env_->control = merge;
+            } else {
+              Push(tval.type, nullptr);
+            }
+            break;
           }
-          break;
-        }
-        case kExprUnreachable: {
-          BUILD0(Unreachable);
-          ssa_env_->Kill(SsaEnv::kControlEnd);
-          Leaf(kAstEnd, nullptr);
-          break;
-        }
-        case kExprI8Const: {
-          ImmI8Operand operand(this, pc_);
-          Leaf(kAstI32, BUILD(Int32Constant, operand.value));
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprI32Const: {
-          ImmI32Operand operand(this, pc_);
-          Leaf(kAstI32, BUILD(Int32Constant, operand.value));
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprI64Const: {
-          ImmI64Operand operand(this, pc_);
-          Leaf(kAstI64, BUILD(Int64Constant, operand.value));
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprF32Const: {
-          ImmF32Operand operand(this, pc_);
-          Leaf(kAstF32, BUILD(Float32Constant, operand.value));
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprF64Const: {
-          ImmF64Operand operand(this, pc_);
-          Leaf(kAstF64, BUILD(Float64Constant, operand.value));
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprGetLocal: {
-          LocalIndexOperand operand(this, pc_);
-          if (Validate(pc_, operand)) {
-            TFNode* val = build() ? ssa_env_->locals[operand.index] : nullptr;
-            Leaf(operand.type, val);
+          case kExprBr: {
+            BreakDepthOperand operand(this, pc_);
+            Value val = {pc_, nullptr, kAstStmt};
+            if (operand.arity) val = Pop();
+            if (Validate(pc_, operand, control_)) {
+              BreakTo(operand.target, val);
+            }
+            len = 1 + operand.length;
+            Push(kAstEnd, nullptr);
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprSetLocal: {
-          LocalIndexOperand operand(this, pc_);
-          if (Validate(pc_, operand)) {
-            Shift(operand.type, 1);
+          case kExprBrIf: {
+            BreakDepthOperand operand(this, pc_);
+            Value cond = Pop(operand.arity, kAstI32);
+            Value val = {pc_, nullptr, kAstStmt};
+            if (operand.arity == 1) val = Pop();
+            if (Validate(pc_, operand, control_)) {
+              SsaEnv* fenv = ssa_env_;
+              SsaEnv* tenv = Split(fenv);
+              fenv->SetNotMerged();
+              BUILD(Branch, cond.node, &tenv->control, &fenv->control);
+              ssa_env_ = tenv;
+              BreakTo(operand.target, val);
+              ssa_env_ = fenv;
+            }
+            len = 1 + operand.length;
+            Push(kAstStmt, nullptr);
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprLoadGlobal: {
-          GlobalIndexOperand operand(this, pc_);
-          if (Validate(pc_, operand)) {
-            Leaf(operand.type, BUILD(LoadGlobal, operand.index));
+          case kExprBrTable: {
+            BranchTableOperand operand(this, pc_);
+            if (Validate(pc_, operand, control_.size())) {
+              Value key = Pop(operand.arity, kAstI32);
+              Value val = {pc_, nullptr, kAstStmt};
+              if (operand.arity == 1) val = Pop();
+              if (failed()) break;
+
+              SsaEnv* break_env = ssa_env_;
+              if (operand.table_count > 0) {
+                // Build branches to the various blocks based on the table.
+                TFNode* sw = BUILD(Switch, operand.table_count + 1, key.node);
+
+                SsaEnv* copy = Steal(break_env);
+                ssa_env_ = copy;
+                for (uint32_t i = 0; i < operand.table_count + 1; i++) {
+                  uint16_t target = operand.read_entry(this, i);
+                  ssa_env_ = Split(copy);
+                  ssa_env_->control = (i == operand.table_count)
+                                          ? BUILD(IfDefault, sw)
+                                          : BUILD(IfValue, i, sw);
+                  int depth = target;
+                  Control* c = &control_[control_.size() - depth - 1];
+                  MergeInto(c->end_env, &c->node, &c->type, val);
+                }
+              } else {
+                // Only a default target. Do the equivalent of br.
+                uint16_t target = operand.read_entry(this, 0);
+                int depth = target;
+                Control* c = &control_[control_.size() - depth - 1];
+                MergeInto(c->end_env, &c->node, &c->type, val);
+              }
+              // br_table ends the control flow like br.
+              ssa_env_ = break_env;
+              Push(kAstStmt, nullptr);
+            }
+            len = 1 + operand.length;
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprStoreGlobal: {
-          GlobalIndexOperand operand(this, pc_);
-          if (Validate(pc_, operand)) {
-            Shift(operand.type, 1);
+          case kExprReturn: {
+            ReturnArityOperand operand(this, pc_);
+            if (operand.arity != sig_->return_count()) {
+              error(pc_, pc_ + 1, "arity mismatch in return");
+            }
+            DoReturn();
+            len = 1 + operand.length;
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprI32LoadMem8S:
-        case kExprI32LoadMem8U:
-        case kExprI32LoadMem16S:
-        case kExprI32LoadMem16U:
-        case kExprI32LoadMem:
-          len = DecodeLoadMem(pc_, kAstI32);
-          break;
-        case kExprI64LoadMem8S:
-        case kExprI64LoadMem8U:
-        case kExprI64LoadMem16S:
-        case kExprI64LoadMem16U:
-        case kExprI64LoadMem32S:
-        case kExprI64LoadMem32U:
-        case kExprI64LoadMem:
-          len = DecodeLoadMem(pc_, kAstI64);
-          break;
-        case kExprF32LoadMem:
-          len = DecodeLoadMem(pc_, kAstF32);
-          break;
-        case kExprF64LoadMem:
-          len = DecodeLoadMem(pc_, kAstF64);
-          break;
-        case kExprI32StoreMem8:
-        case kExprI32StoreMem16:
-        case kExprI32StoreMem:
-          len = DecodeStoreMem(pc_, kAstI32);
-          break;
-        case kExprI64StoreMem8:
-        case kExprI64StoreMem16:
-        case kExprI64StoreMem32:
-        case kExprI64StoreMem:
-          len = DecodeStoreMem(pc_, kAstI64);
-          break;
-        case kExprF32StoreMem:
-          len = DecodeStoreMem(pc_, kAstF32);
-          break;
-        case kExprF64StoreMem:
-          len = DecodeStoreMem(pc_, kAstF64);
-          break;
-        case kExprMemorySize:
-          Leaf(kAstI32, BUILD(MemSize, 0));
-          break;
-        case kExprGrowMemory:
-          Shift(kAstI32, 1);
-          break;
-        case kExprCallFunction: {
-          FunctionIndexOperand operand(this, pc_);
-          if (Validate(pc_, operand)) {
-            LocalType type = operand.sig->return_count() == 0
-                                 ? kAstStmt
-                                 : operand.sig->GetReturn();
-            Shift(type, static_cast<int>(operand.sig->parameter_count()));
+          case kExprUnreachable: {
+            Push(kAstEnd, BUILD(Unreachable, position()));
+            ssa_env_->Kill(SsaEnv::kControlEnd);
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprCallIndirect: {
-          SignatureIndexOperand operand(this, pc_);
-          if (Validate(pc_, operand)) {
-            LocalType type = operand.sig->return_count() == 0
-                                 ? kAstStmt
-                                 : operand.sig->GetReturn();
-            Shift(type, static_cast<int>(1 + operand.sig->parameter_count()));
+          case kExprI8Const: {
+            ImmI8Operand operand(this, pc_);
+            Push(kAstI32, BUILD(Int32Constant, operand.value));
+            len = 1 + operand.length;
+            break;
           }
-          len = 1 + operand.length;
-          break;
-        }
-        case kExprCallImport: {
-          ImportIndexOperand operand(this, pc_);
-          if (Validate(pc_, operand)) {
-            LocalType type = operand.sig->return_count() == 0
-                                 ? kAstStmt
-                                 : operand.sig->GetReturn();
-            Shift(type, static_cast<int>(operand.sig->parameter_count()));
+          case kExprI32Const: {
+            ImmI32Operand operand(this, pc_);
+            Push(kAstI32, BUILD(Int32Constant, operand.value));
+            len = 1 + operand.length;
+            break;
           }
-          len = 1 + operand.length;
-          break;
+          case kExprI64Const: {
+            ImmI64Operand operand(this, pc_);
+            Push(kAstI64, BUILD(Int64Constant, operand.value));
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprF32Const: {
+            ImmF32Operand operand(this, pc_);
+            Push(kAstF32, BUILD(Float32Constant, operand.value));
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprF64Const: {
+            ImmF64Operand operand(this, pc_);
+            Push(kAstF64, BUILD(Float64Constant, operand.value));
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprGetLocal: {
+            LocalIndexOperand operand(this, pc_);
+            if (Validate(pc_, operand)) {
+              if (build()) {
+                Push(operand.type, ssa_env_->locals[operand.index]);
+              } else {
+                Push(operand.type, nullptr);
+              }
+            }
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprSetLocal: {
+            LocalIndexOperand operand(this, pc_);
+            if (Validate(pc_, operand)) {
+              Value val = Pop(0, local_type_vec_[operand.index]);
+              if (ssa_env_->locals) ssa_env_->locals[operand.index] = val.node;
+              Push(val.type, val.node);
+            }
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprLoadGlobal: {
+            GlobalIndexOperand operand(this, pc_);
+            if (Validate(pc_, operand)) {
+              Push(operand.type, BUILD(LoadGlobal, operand.index));
+            }
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprStoreGlobal: {
+            GlobalIndexOperand operand(this, pc_);
+            if (Validate(pc_, operand)) {
+              Value val = Pop(0, operand.type);
+              BUILD(StoreGlobal, operand.index, val.node);
+              Push(val.type, val.node);
+            }
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprI32LoadMem8S:
+            len = DecodeLoadMem(kAstI32, MachineType::Int8());
+            break;
+          case kExprI32LoadMem8U:
+            len = DecodeLoadMem(kAstI32, MachineType::Uint8());
+            break;
+          case kExprI32LoadMem16S:
+            len = DecodeLoadMem(kAstI32, MachineType::Int16());
+            break;
+          case kExprI32LoadMem16U:
+            len = DecodeLoadMem(kAstI32, MachineType::Uint16());
+            break;
+          case kExprI32LoadMem:
+            len = DecodeLoadMem(kAstI32, MachineType::Int32());
+            break;
+
+          case kExprI64LoadMem8S:
+            len = DecodeLoadMem(kAstI64, MachineType::Int8());
+            break;
+          case kExprI64LoadMem8U:
+            len = DecodeLoadMem(kAstI64, MachineType::Uint8());
+            break;
+          case kExprI64LoadMem16S:
+            len = DecodeLoadMem(kAstI64, MachineType::Int16());
+            break;
+          case kExprI64LoadMem16U:
+            len = DecodeLoadMem(kAstI64, MachineType::Uint16());
+            break;
+          case kExprI64LoadMem32S:
+            len = DecodeLoadMem(kAstI64, MachineType::Int32());
+            break;
+          case kExprI64LoadMem32U:
+            len = DecodeLoadMem(kAstI64, MachineType::Uint32());
+            break;
+          case kExprI64LoadMem:
+            len = DecodeLoadMem(kAstI64, MachineType::Int64());
+            break;
+          case kExprF32LoadMem:
+            len = DecodeLoadMem(kAstF32, MachineType::Float32());
+            break;
+          case kExprF64LoadMem:
+            len = DecodeLoadMem(kAstF64, MachineType::Float64());
+            break;
+          case kExprI32StoreMem8:
+            len = DecodeStoreMem(kAstI32, MachineType::Int8());
+            break;
+          case kExprI32StoreMem16:
+            len = DecodeStoreMem(kAstI32, MachineType::Int16());
+            break;
+          case kExprI32StoreMem:
+            len = DecodeStoreMem(kAstI32, MachineType::Int32());
+            break;
+          case kExprI64StoreMem8:
+            len = DecodeStoreMem(kAstI64, MachineType::Int8());
+            break;
+          case kExprI64StoreMem16:
+            len = DecodeStoreMem(kAstI64, MachineType::Int16());
+            break;
+          case kExprI64StoreMem32:
+            len = DecodeStoreMem(kAstI64, MachineType::Int32());
+            break;
+          case kExprI64StoreMem:
+            len = DecodeStoreMem(kAstI64, MachineType::Int64());
+            break;
+          case kExprF32StoreMem:
+            len = DecodeStoreMem(kAstF32, MachineType::Float32());
+            break;
+          case kExprF64StoreMem:
+            len = DecodeStoreMem(kAstF64, MachineType::Float64());
+            break;
+
+          case kExprMemorySize:
+            Push(kAstI32, BUILD(MemSize, 0));
+            break;
+          case kExprGrowMemory: {
+            Value val = Pop(0, kAstI32);
+            USE(val);  // TODO(titzer): build node for grow memory
+            Push(kAstI32, BUILD(Int32Constant, 0));
+            break;
+          }
+          case kExprCallFunction: {
+            CallFunctionOperand operand(this, pc_);
+            if (Validate(pc_, operand)) {
+              TFNode** buffer = PopArgs(operand.sig);
+              TFNode* call =
+                  BUILD(CallDirect, operand.index, buffer, position());
+              Push(GetReturnType(operand.sig), call);
+            }
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprCallIndirect: {
+            CallIndirectOperand operand(this, pc_);
+            if (Validate(pc_, operand)) {
+              TFNode** buffer = PopArgs(operand.sig);
+              Value index = Pop(0, kAstI32);
+              if (buffer) buffer[0] = index.node;
+              TFNode* call =
+                  BUILD(CallIndirect, operand.index, buffer, position());
+              Push(GetReturnType(operand.sig), call);
+            }
+            len = 1 + operand.length;
+            break;
+          }
+          case kExprCallImport: {
+            CallImportOperand operand(this, pc_);
+            if (Validate(pc_, operand)) {
+              TFNode** buffer = PopArgs(operand.sig);
+              TFNode* call =
+                  BUILD(CallImport, operand.index, buffer, position());
+              Push(GetReturnType(operand.sig), call);
+            }
+            len = 1 + operand.length;
+            break;
+          }
+          default:
+            error("Invalid opcode");
+            return;
         }
-        case kExprDeclLocals:
-        default:
-          error("Invalid opcode");
-          return;
+      }  // end complex bytecode
+
+#if DEBUG
+      if (FLAG_trace_wasm_decoder) {
+        for (size_t i = 0; i < stack_.size(); i++) {
+          Value& val = stack_[i];
+          WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
+          PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
+                 static_cast<int>(val.pc - start_),
+                 WasmOpcodes::ShortOpcodeName(opcode));
+          switch (opcode) {
+            case kExprI32Const: {
+              ImmI32Operand operand(this, val.pc);
+              PrintF("[%d]", operand.value);
+              break;
+            }
+            case kExprGetLocal: {
+              LocalIndexOperand operand(this, val.pc);
+              PrintF("[%u]", operand.index);
+              break;
+            }
+            case kExprSetLocal: {
+              LocalIndexOperand operand(this, val.pc);
+              PrintF("[%u]", operand.index);
+              break;
+            }
+            default:
+              break;
+          }
+        }
+        PrintF("\n");
       }
+#endif
       pc_ += len;
       if (pc_ >= limit_) {
         // End of code reached or exceeded.
-        if (pc_ > limit_ && ok()) {
-          error("Beyond end of code");
-        }
+        if (pc_ > limit_ && ok()) error("Beyond end of code");
         return;
       }
+    }  // end decode loop
+  }    // end DecodeFunctionBody()
+
+  TFNode** PopArgs(FunctionSig* sig) {
+    if (build()) {
+      int count = static_cast<int>(sig->parameter_count());
+      TFNode** buffer = builder_->Buffer(count + 1);
+      buffer[0] = nullptr;  // reserved for code object or function index.
+      for (int i = count - 1; i >= 0; i--) {
+        buffer[i + 1] = Pop(i, sig->GetParam(i)).node;
+      }
+      return buffer;
+    } else {
+      int count = static_cast<int>(sig->parameter_count());
+      for (int i = count - 1; i >= 0; i--) {
+        Pop(i, sig->GetParam(i));
+      }
+      return nullptr;
     }
   }
 
-  void PushBlock(SsaEnv* ssa_env) {
-    blocks_.push_back({ssa_env, static_cast<int>(stack_.size() - 1)});
+  LocalType GetReturnType(FunctionSig* sig) {
+    return sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
   }
 
-  int DecodeLoadMem(const byte* pc, LocalType type) {
-    MemoryAccessOperand operand(this, pc);
-    Shift(type, 1);
+  void PushBlock(SsaEnv* end_env) {
+    int stack_depth = static_cast<int>(stack_.size());
+    control_.push_back(
+        {pc_, stack_depth, end_env, nullptr, nullptr, kAstEnd, false});
+  }
+
+  void PushLoop(SsaEnv* end_env) {
+    int stack_depth = static_cast<int>(stack_.size());
+    control_.push_back(
+        {pc_, stack_depth, end_env, nullptr, nullptr, kAstEnd, true});
+  }
+
+  void PushIf(SsaEnv* end_env, SsaEnv* false_env) {
+    int stack_depth = static_cast<int>(stack_.size());
+    control_.push_back(
+        {pc_, stack_depth, end_env, false_env, nullptr, kAstStmt, false});
+  }
+
+  int DecodeLoadMem(LocalType type, MachineType mem_type) {
+    MemoryAccessOperand operand(this, pc_);
+    Value index = Pop(0, kAstI32);
+    TFNode* node =
+        BUILD(LoadMem, type, mem_type, index.node, operand.offset, position());
+    Push(type, node);
     return 1 + operand.length;
   }
 
-  int DecodeStoreMem(const byte* pc, LocalType type) {
-    MemoryAccessOperand operand(this, pc);
-    Shift(type, 2);
+  int DecodeStoreMem(LocalType type, MachineType mem_type) {
+    MemoryAccessOperand operand(this, pc_);
+    Value val = Pop(1, type);
+    Value index = Pop(0, kAstI32);
+    BUILD(StoreMem, mem_type, index.node, operand.offset, val.node, position());
+    Push(type, val.node);
     return 1 + operand.length;
   }
 
-  void AddImplicitReturnAtEnd() {
-    int retcount = static_cast<int>(sig_->return_count());
-    if (retcount == 0) {
-      BUILD0(ReturnVoid);
-      return;
+  void DoReturn() {
+    int count = static_cast<int>(sig_->return_count());
+    TFNode** buffer = nullptr;
+    if (build()) buffer = builder_->Buffer(count);
+
+    // Pop return values off the stack in reverse order.
+    for (int i = count - 1; i >= 0; i--) {
+      Value val = Pop(i, sig_->GetReturn(i));
+      if (buffer) buffer[i] = val.node;
     }
 
-    if (static_cast<int>(trees_.size()) < retcount) {
-      error(limit_, nullptr,
-            "ImplicitReturn expects %d arguments, only %d remain", retcount,
-            static_cast<int>(trees_.size()));
-      return;
-    }
+    Push(kAstEnd, BUILD(Return, count, buffer));
+    ssa_env_->Kill(SsaEnv::kControlEnd);
+  }
 
-    TRACE("wasm-decode implicit return of %d args\n", retcount);
+  void Push(LocalType type, TFNode* node) {
+    stack_.push_back({pc_, node, type});
+  }
 
-    TFNode** buffer = BUILD(Buffer, retcount);
-    for (int index = 0; index < retcount; index++) {
-      Tree* tree = trees_[trees_.size() - 1 - index];
-      if (buffer) buffer[index] = tree->node;
-      LocalType expected = sig_->GetReturn(index);
-      if (tree->type != expected) {
-        error(limit_, tree->pc,
-              "ImplicitReturn[%d] expected type %s, found %s of type %s", index,
-              WasmOpcodes::TypeName(expected),
-              WasmOpcodes::OpcodeName(tree->opcode()),
-              WasmOpcodes::TypeName(tree->type));
-        return;
+  const char* SafeOpcodeNameAt(const byte* pc) {
+    if (pc >= end_) return "<end>";
+    return WasmOpcodes::ShortOpcodeName(static_cast<WasmOpcode>(*pc));
+  }
+
+  Value Pop(int index, LocalType expected) {
+    Value val = Pop();
+    if (val.type != expected) {
+      if (val.type != kAstEnd) {
+        error(pc_, val.pc, "%s[%d] expected type %s, found %s of type %s",
+              SafeOpcodeNameAt(pc_), index, WasmOpcodes::TypeName(expected),
+              SafeOpcodeNameAt(val.pc), WasmOpcodes::TypeName(val.type));
       }
     }
+    return val;
+  }
 
-    BUILD(Return, retcount, buffer);
+  Value Pop() {
+    size_t limit = control_.empty() ? 0 : control_.back().stack_depth;
+    if (stack_.size() <= limit) {
+      Value val = {pc_, nullptr, kAstStmt};
+      error(pc_, pc_, "%s found empty stack", SafeOpcodeNameAt(pc_));
+      return val;
+    }
+    Value val = stack_.back();
+    stack_.pop_back();
+    return val;
+  }
+
+  Value PopUpTo(int stack_depth) {
+    if (stack_depth == stack_.size()) {
+      Value val = {pc_, nullptr, kAstStmt};
+      return val;
+    } else {
+      DCHECK_LE(stack_depth, static_cast<int>(stack_.size()));
+      Value val = Pop();
+      stack_.resize(stack_depth);
+      return val;
+    }
   }
 
   int baserel(const byte* ptr) {
@@ -937,437 +1185,65 @@
 
   int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
 
-  void Reduce(Production* p) {
-    WasmOpcode opcode = p->opcode();
-    TRACE("-----reduce module+%-6d %s func+%d: 0x%02x %s\n", baserel(p->pc()),
-          indentation(), startrel(p->pc()), opcode,
-          WasmOpcodes::OpcodeName(opcode));
-    FunctionSig* sig = WasmOpcodes::Signature(opcode);
-    if (sig) {
-      // A simple expression with a fixed signature.
-      TypeCheckLast(p, sig->GetParam(p->index - 1));
-      if (p->done() && build()) {
-        if (sig->parameter_count() == 2) {
-          p->tree->node = builder_->Binop(opcode, p->tree->children[0]->node,
-                                          p->tree->children[1]->node);
-        } else if (sig->parameter_count() == 1) {
-          p->tree->node = builder_->Unop(opcode, p->tree->children[0]->node);
-        } else {
-          UNREACHABLE();
-        }
-      }
-      return;
-    }
-
-    switch (opcode) {
-      case kExprBlock: {
-        if (p->done()) {
-          Block* last = &blocks_.back();
-          DCHECK_EQ(stack_.size() - 1, last->stack_depth);
-          // fallthrough with the last expression.
-          ReduceBreakToExprBlock(p, last);
-          SetEnv("block:end", last->ssa_env);
-          blocks_.pop_back();
-        }
-        break;
-      }
-      case kExprLoop: {
-        if (p->done()) {
-          // Pop the continue environment.
-          blocks_.pop_back();
-          // Get the break environment.
-          Block* last = &blocks_.back();
-          DCHECK_EQ(stack_.size() - 1, last->stack_depth);
-          // fallthrough with the last expression.
-          ReduceBreakToExprBlock(p, last);
-          SetEnv("loop:end", last->ssa_env);
-          blocks_.pop_back();
-        }
-        break;
-      }
-      case kExprIf: {
-        if (p->index == 1) {
-          // Condition done. Split environment for true branch.
-          TypeCheckLast(p, kAstI32);
-          SsaEnv* false_env = ssa_env_;
-          SsaEnv* true_env = Split(ssa_env_);
-          ifs_.push_back({nullptr, false_env, nullptr});
-          BUILD(Branch, p->last()->node, &true_env->control,
-                &false_env->control);
-          SetEnv("if:true", true_env);
-        } else if (p->index == 2) {
-          // True block done. Merge true and false environments.
-          IfEnv* env = &ifs_.back();
-          SsaEnv* merge = env->merge_env;
-          if (merge->go()) {
-            merge->state = SsaEnv::kReached;
-            Goto(ssa_env_, merge);
-          }
-          SetEnv("if:merge", merge);
-          ifs_.pop_back();
-        }
-        break;
-      }
-      case kExprIfElse: {
-        if (p->index == 1) {
-          // Condition done. Split environment for true and false branches.
-          TypeCheckLast(p, kAstI32);
-          SsaEnv* merge_env = ssa_env_;
-          TFNode* if_true = nullptr;
-          TFNode* if_false = nullptr;
-          BUILD(Branch, p->last()->node, &if_true, &if_false);
-          SsaEnv* false_env = Split(ssa_env_);
-          SsaEnv* true_env = Steal(ssa_env_);
-          false_env->control = if_false;
-          true_env->control = if_true;
-          ifs_.push_back({false_env, merge_env, nullptr});
-          SetEnv("if_else:true", true_env);
-        } else if (p->index == 2) {
-          // True expr done.
-          IfEnv* env = &ifs_.back();
-          MergeIntoProduction(p, env->merge_env, p->last());
-          // Switch to environment for false branch.
-          SsaEnv* false_env = ifs_.back().false_env;
-          SetEnv("if_else:false", false_env);
-        } else if (p->index == 3) {
-          // False expr done.
-          IfEnv* env = &ifs_.back();
-          MergeIntoProduction(p, env->merge_env, p->last());
-          SetEnv("if_else:merge", env->merge_env);
-          ifs_.pop_back();
-        }
-        break;
-      }
-      case kExprSelect: {
-        if (p->index == 1) {
-          // True expression done.
-          p->tree->type = p->last()->type;
-          if (p->tree->type == kAstStmt) {
-            error(p->pc(), p->tree->children[1]->pc,
-                  "select operand should be expression");
-          }
-        } else if (p->index == 2) {
-          // False expression done.
-          TypeCheckLast(p, p->tree->type);
-        } else {
-          // Condition done.
-          DCHECK(p->done());
-          TypeCheckLast(p, kAstI32);
-          if (build()) {
-            TFNode* controls[2];
-            builder_->Branch(p->tree->children[2]->node, &controls[0],
-                             &controls[1]);
-            TFNode* merge = builder_->Merge(2, controls);
-            TFNode* vals[2] = {p->tree->children[0]->node,
-                               p->tree->children[1]->node};
-            TFNode* phi = builder_->Phi(p->tree->type, 2, vals, merge);
-            p->tree->node = phi;
-            ssa_env_->control = merge;
-          }
-        }
-        break;
-      }
-      case kExprBr: {
-        BreakDepthOperand operand(this, p->pc());
-        CHECK(Validate(p->pc(), operand, blocks_));
-        ReduceBreakToExprBlock(p, operand.target);
-        break;
-      }
-      case kExprBrIf: {
-        if (p->done()) {
-          TypeCheckLast(p, kAstI32);
-          BreakDepthOperand operand(this, p->pc());
-          CHECK(Validate(p->pc(), operand, blocks_));
-          SsaEnv* fenv = ssa_env_;
-          SsaEnv* tenv = Split(fenv);
-          BUILD(Branch, p->tree->children[1]->node, &tenv->control,
-                &fenv->control);
-          ssa_env_ = tenv;
-          ReduceBreakToExprBlock(p, operand.target, p->tree->children[0]);
-          ssa_env_ = fenv;
-        }
-        break;
-      }
-      case kExprBrTable: {
-        if (p->index == 1) {
-          // Switch key finished.
-          TypeCheckLast(p, kAstI32);
-          if (failed()) break;
-
-          BranchTableOperand operand(this, p->pc());
-          DCHECK(Validate(p->pc(), operand, blocks_.size()));
-
-          // Build a switch only if it has more than just a default target.
-          bool build_switch = operand.table_count > 0;
-          TFNode* sw = nullptr;
-          if (build_switch) {
-            sw = BUILD(Switch, operand.table_count + 1, p->last()->node);
-          }
-
-          // Process the targets of the break table.
-          SsaEnv* prev = ssa_env_;
-          SsaEnv* copy = Steal(prev);
-          for (uint32_t i = 0; i < operand.table_count + 1; i++) {
-            uint32_t target = operand.read_entry(this, i);
-            SsaEnv* env = copy;
-            if (build_switch) {
-              ssa_env_ = env = Split(env);
-              env->control = i == operand.table_count ? BUILD(IfDefault, sw)
-                                                      : BUILD(IfValue, i, sw);
-            }
-            SsaEnv* tenv = blocks_[blocks_.size() - target - 1].ssa_env;
-            Goto(env, tenv);
-          }
-          ssa_env_ = prev;
-        }
-        break;
-      }
-      case kExprReturn: {
-        TypeCheckLast(p, sig_->GetReturn(p->index - 1));
-        if (p->done()) {
-          if (build()) {
-            int count = p->tree->count;
-            TFNode** buffer = builder_->Buffer(count);
-            for (int i = 0; i < count; i++) {
-              buffer[i] = p->tree->children[i]->node;
-            }
-            BUILD(Return, count, buffer);
-          }
-          ssa_env_->Kill(SsaEnv::kControlEnd);
-        }
-        break;
-      }
-      case kExprSetLocal: {
-        LocalIndexOperand operand(this, p->pc());
-        CHECK(Validate(p->pc(), operand));
-        Tree* val = p->last();
-        if (operand.type == val->type) {
-          if (build()) ssa_env_->locals[operand.index] = val->node;
-          p->tree->node = val->node;
-        } else {
-          error(p->pc(), val->pc, "Typecheck failed in SetLocal");
-        }
-        break;
-      }
-      case kExprStoreGlobal: {
-        GlobalIndexOperand operand(this, p->pc());
-        CHECK(Validate(p->pc(), operand));
-        Tree* val = p->last();
-        if (operand.type == val->type) {
-          BUILD(StoreGlobal, operand.index, val->node);
-          p->tree->node = val->node;
-        } else {
-          error(p->pc(), val->pc, "Typecheck failed in StoreGlobal");
-        }
-        break;
-      }
-
-      case kExprI32LoadMem8S:
-        return ReduceLoadMem(p, kAstI32, MachineType::Int8());
-      case kExprI32LoadMem8U:
-        return ReduceLoadMem(p, kAstI32, MachineType::Uint8());
-      case kExprI32LoadMem16S:
-        return ReduceLoadMem(p, kAstI32, MachineType::Int16());
-      case kExprI32LoadMem16U:
-        return ReduceLoadMem(p, kAstI32, MachineType::Uint16());
-      case kExprI32LoadMem:
-        return ReduceLoadMem(p, kAstI32, MachineType::Int32());
-
-      case kExprI64LoadMem8S:
-        return ReduceLoadMem(p, kAstI64, MachineType::Int8());
-      case kExprI64LoadMem8U:
-        return ReduceLoadMem(p, kAstI64, MachineType::Uint8());
-      case kExprI64LoadMem16S:
-        return ReduceLoadMem(p, kAstI64, MachineType::Int16());
-      case kExprI64LoadMem16U:
-        return ReduceLoadMem(p, kAstI64, MachineType::Uint16());
-      case kExprI64LoadMem32S:
-        return ReduceLoadMem(p, kAstI64, MachineType::Int32());
-      case kExprI64LoadMem32U:
-        return ReduceLoadMem(p, kAstI64, MachineType::Uint32());
-      case kExprI64LoadMem:
-        return ReduceLoadMem(p, kAstI64, MachineType::Int64());
-
-      case kExprF32LoadMem:
-        return ReduceLoadMem(p, kAstF32, MachineType::Float32());
-
-      case kExprF64LoadMem:
-        return ReduceLoadMem(p, kAstF64, MachineType::Float64());
-
-      case kExprI32StoreMem8:
-        return ReduceStoreMem(p, kAstI32, MachineType::Int8());
-      case kExprI32StoreMem16:
-        return ReduceStoreMem(p, kAstI32, MachineType::Int16());
-      case kExprI32StoreMem:
-        return ReduceStoreMem(p, kAstI32, MachineType::Int32());
-
-      case kExprI64StoreMem8:
-        return ReduceStoreMem(p, kAstI64, MachineType::Int8());
-      case kExprI64StoreMem16:
-        return ReduceStoreMem(p, kAstI64, MachineType::Int16());
-      case kExprI64StoreMem32:
-        return ReduceStoreMem(p, kAstI64, MachineType::Int32());
-      case kExprI64StoreMem:
-        return ReduceStoreMem(p, kAstI64, MachineType::Int64());
-
-      case kExprF32StoreMem:
-        return ReduceStoreMem(p, kAstF32, MachineType::Float32());
-
-      case kExprF64StoreMem:
-        return ReduceStoreMem(p, kAstF64, MachineType::Float64());
-
-      case kExprGrowMemory:
-        TypeCheckLast(p, kAstI32);
-        // TODO(titzer): build node for GrowMemory
-        p->tree->node = BUILD(Int32Constant, 0);
-        return;
-
-      case kExprCallFunction: {
-        FunctionIndexOperand operand(this, p->pc());
-        CHECK(Validate(p->pc(), operand));
-        if (p->index > 0) {
-          TypeCheckLast(p, operand.sig->GetParam(p->index - 1));
-        }
-        if (p->done() && build()) {
-          uint32_t count = p->tree->count + 1;
-          TFNode** buffer = builder_->Buffer(count);
-          buffer[0] = nullptr;  // reserved for code object.
-          for (uint32_t i = 1; i < count; i++) {
-            buffer[i] = p->tree->children[i - 1]->node;
-          }
-          p->tree->node = builder_->CallDirect(operand.index, buffer);
-        }
-        break;
-      }
-      case kExprCallIndirect: {
-        SignatureIndexOperand operand(this, p->pc());
-        CHECK(Validate(p->pc(), operand));
-        if (p->index == 1) {
-          TypeCheckLast(p, kAstI32);
-        } else {
-          TypeCheckLast(p, operand.sig->GetParam(p->index - 2));
-        }
-        if (p->done() && build()) {
-          uint32_t count = p->tree->count;
-          TFNode** buffer = builder_->Buffer(count);
-          for (uint32_t i = 0; i < count; i++) {
-            buffer[i] = p->tree->children[i]->node;
-          }
-          p->tree->node = builder_->CallIndirect(operand.index, buffer);
-        }
-        break;
-      }
-      case kExprCallImport: {
-        ImportIndexOperand operand(this, p->pc());
-        CHECK(Validate(p->pc(), operand));
-        if (p->index > 0) {
-          TypeCheckLast(p, operand.sig->GetParam(p->index - 1));
-        }
-        if (p->done() && build()) {
-          uint32_t count = p->tree->count + 1;
-          TFNode** buffer = builder_->Buffer(count);
-          buffer[0] = nullptr;  // reserved for code object.
-          for (uint32_t i = 1; i < count; i++) {
-            buffer[i] = p->tree->children[i - 1]->node;
-          }
-          p->tree->node = builder_->CallImport(operand.index, buffer);
-        }
-        break;
-      }
-      default:
-        break;
-    }
-  }
-
-  void ReduceBreakToExprBlock(Production* p, Block* block) {
-    ReduceBreakToExprBlock(p, block, p->tree->count > 0 ? p->last() : nullptr);
-  }
-
-  void ReduceBreakToExprBlock(Production* p, Block* block, Tree* val) {
-    if (block->stack_depth < 0) {
+  void BreakTo(Control* block, Value& val) {
+    if (block->is_loop) {
       // This is the inner loop block, which does not have a value.
-      Goto(ssa_env_, block->ssa_env);
+      Goto(ssa_env_, block->end_env);
     } else {
       // Merge the value into the production for the block.
-      Production* bp = &stack_[block->stack_depth];
-      MergeIntoProduction(bp, block->ssa_env, val);
+      MergeInto(block->end_env, &block->node, &block->type, val);
     }
   }
 
-  void MergeIntoProduction(Production* p, SsaEnv* target, Tree* expr) {
+  void MergeInto(SsaEnv* target, TFNode** node, LocalType* type, Value& val) {
     if (!ssa_env_->go()) return;
+    DCHECK_NE(kAstEnd, val.type);
 
     bool first = target->state == SsaEnv::kUnreachable;
     Goto(ssa_env_, target);
-    if (expr == nullptr || expr->type == kAstEnd) return;
 
     if (first) {
       // first merge to this environment; set the type and the node.
-      p->tree->type = expr->type;
-      p->tree->node = expr->node;
-    } else {
+      *type = val.type;
+      *node = val.node;
+    } else if (val.type == *type && val.type != kAstStmt) {
       // merge with the existing value for this block.
-      LocalType type = p->tree->type;
-      if (expr->type != type) {
-        type = kAstStmt;
-        p->tree->type = kAstStmt;
-        p->tree->node = nullptr;
-      } else if (type != kAstStmt) {
-        p->tree->node = CreateOrMergeIntoPhi(type, target->control,
-                                             p->tree->node, expr->node);
-      }
-    }
-  }
-
-  void ReduceLoadMem(Production* p, LocalType type, MachineType mem_type) {
-    DCHECK_EQ(1, p->index);
-    TypeCheckLast(p, kAstI32);  // index
-    if (build()) {
-      MemoryAccessOperand operand(this, p->pc());
-      p->tree->node =
-          builder_->LoadMem(type, mem_type, p->last()->node, operand.offset);
-    }
-  }
-
-  void ReduceStoreMem(Production* p, LocalType type, MachineType mem_type) {
-    if (p->index == 1) {
-      TypeCheckLast(p, kAstI32);  // index
+      *node = CreateOrMergeIntoPhi(*type, target->control, *node, val.node);
     } else {
-      DCHECK_EQ(2, p->index);
-      TypeCheckLast(p, type);
-      if (build()) {
-        MemoryAccessOperand operand(this, p->pc());
-        TFNode* val = p->tree->children[1]->node;
-        builder_->StoreMem(mem_type, p->tree->children[0]->node, operand.offset,
-                           val);
-        p->tree->node = val;
-      }
-    }
-  }
-
-  void TypeCheckLast(Production* p, LocalType expected) {
-    LocalType result = p->last()->type;
-    if (result == expected) return;
-    if (result == kAstEnd) return;
-    if (expected != kAstStmt) {
-      error(p->pc(), p->last()->pc,
-            "%s[%d] expected type %s, found %s of type %s",
-            WasmOpcodes::OpcodeName(p->opcode()), p->index - 1,
-            WasmOpcodes::TypeName(expected),
-            WasmOpcodes::OpcodeName(p->last()->opcode()),
-            WasmOpcodes::TypeName(p->last()->type));
+      // types don't match, or block is already a stmt.
+      *type = kAstStmt;
+      *node = nullptr;
     }
   }
 
   void SetEnv(const char* reason, SsaEnv* env) {
 #if DEBUG
-    TRACE("  env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
-          static_cast<int>(blocks_.size()), reason);
-    if (FLAG_trace_wasm_decoder && env && env->control) {
-      TRACE(", control = ");
-      compiler::WasmGraphBuilder::PrintDebugName(env->control);
+    if (FLAG_trace_wasm_decoder) {
+      char state = 'X';
+      if (env) {
+        switch (env->state) {
+          case SsaEnv::kReached:
+            state = 'R';
+            break;
+          case SsaEnv::kUnreachable:
+            state = 'U';
+            break;
+          case SsaEnv::kMerged:
+            state = 'M';
+            break;
+          case SsaEnv::kControlEnd:
+            state = 'E';
+            break;
+        }
+      }
+      PrintF("  env = %p, state = %c, reason = %s", static_cast<void*>(env),
+             state, reason);
+      if (env && env->control) {
+        PrintF(", control = ");
+        compiler::WasmGraphBuilder::PrintDebugName(env->control);
+      }
+      PrintF("\n");
     }
-    TRACE("\n");
 #endif
     ssa_env_ = env;
     if (builder_) {
@@ -1417,7 +1293,7 @@
         builder_->AppendToMerge(merge, from->control);
         // Merge effects.
         if (builder_->IsPhiWithMerge(to->effect, merge)) {
-          builder_->AppendToPhi(merge, to->effect, from->effect);
+          builder_->AppendToPhi(to->effect, from->effect);
         } else if (to->effect != from->effect) {
           uint32_t count = builder_->InputCount(merge);
           TFNode** effects = builder_->Buffer(count);
@@ -1432,7 +1308,7 @@
           TFNode* tnode = to->locals[i];
           TFNode* fnode = from->locals[i];
           if (builder_->IsPhiWithMerge(tnode, merge)) {
-            builder_->AppendToPhi(merge, tnode, fnode);
+            builder_->AppendToPhi(tnode, fnode);
           } else if (tnode != fnode) {
             uint32_t count = builder_->InputCount(merge);
             TFNode** vals = builder_->Buffer(count);
@@ -1455,7 +1331,7 @@
   TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
                                TFNode* fnode) {
     if (builder_->IsPhiWithMerge(tnode, merge)) {
-      builder_->AppendToPhi(merge, tnode, fnode);
+      builder_->AppendToPhi(tnode, fnode);
     } else if (tnode != fnode) {
       uint32_t count = builder_->InputCount(merge);
       TFNode** vals = builder_->Buffer(count);
@@ -1501,8 +1377,6 @@
     size_t size = sizeof(TFNode*) * EnvironmentCount();
     result->control = from->control;
     result->effect = from->effect;
-    result->state = from->state == SsaEnv::kUnreachable ? SsaEnv::kUnreachable
-                                                        : SsaEnv::kReached;
 
     if (from->go()) {
       result->state = SsaEnv::kReached;
@@ -1549,89 +1423,54 @@
   virtual void onFirstError() {
     limit_ = start_;     // Terminate decoding loop.
     builder_ = nullptr;  // Don't build any more nodes.
-#if DEBUG
-    PrintStackForDebugging();
-#endif
+    TRACE(" !%s\n", error_msg_.get());
   }
-
-#if DEBUG
-  void PrintStackForDebugging() { PrintProduction(0); }
-
-  void PrintProduction(size_t depth) {
-    if (depth >= stack_.size()) return;
-    Production* p = &stack_[depth];
-    for (size_t d = 0; d < depth; d++) PrintF("  ");
-
-    PrintF("@%d %s [%d]\n", static_cast<int>(p->tree->pc - start_),
-           WasmOpcodes::OpcodeName(p->opcode()), p->tree->count);
-    for (int i = 0; i < p->index; i++) {
-      Tree* child = p->tree->children[i];
-      for (size_t d = 0; d <= depth; d++) PrintF("  ");
-      PrintF("@%d %s [%d]", static_cast<int>(child->pc - start_),
-             WasmOpcodes::OpcodeName(child->opcode()), child->count);
-      if (child->node) {
-        PrintF(" => TF");
-        compiler::WasmGraphBuilder::PrintDebugName(child->node);
-      }
-      PrintF("\n");
-    }
-    PrintProduction(depth + 1);
-  }
-#endif
-
   BitVector* AnalyzeLoopAssignment(const byte* pc) {
     if (pc >= limit_) return nullptr;
     if (*pc != kExprLoop) return nullptr;
 
     BitVector* assigned =
-        new (zone_) BitVector(static_cast<int>(total_locals_), zone_);
-    // Keep a stack to model the nesting of expressions.
-    std::vector<int> arity_stack;
-    arity_stack.push_back(OpcodeArity(pc));
-    pc += OpcodeLength(pc);
-
+        new (zone_) BitVector(static_cast<int>(local_type_vec_.size()), zone_);
+    int depth = 0;
     // Iteratively process all AST nodes nested inside the loop.
     while (pc < limit_) {
       WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
-      int arity = 0;
       int length = 1;
-      int assigned_index = -1;
-      if (opcode == kExprSetLocal) {
-        LocalIndexOperand operand(this, pc);
-        if (assigned->length() > 0 &&
-            static_cast<int>(operand.index) < assigned->length()) {
-          // Unverified code might have an out-of-bounds index.
-          // Ignore out-of-bounds indices, as the main verification will fail.
-          assigned->Add(operand.index);
-          assigned_index = operand.index;
+      switch (opcode) {
+        case kExprLoop:
+        case kExprIf:
+        case kExprBlock:
+          depth++;
+          DCHECK_EQ(1, OpcodeLength(pc));
+          break;
+        case kExprSetLocal: {
+          LocalIndexOperand operand(this, pc);
+          if (assigned->length() > 0 &&
+              static_cast<int>(operand.index) < assigned->length()) {
+            // Unverified code might have an out-of-bounds index.
+            assigned->Add(operand.index);
+          }
+          length = 1 + operand.length;
+          break;
         }
-        arity = 1;
-        length = 1 + operand.length;
-      } else {
-        arity = OpcodeArity(pc);
-        length = OpcodeLength(pc);
+        case kExprEnd:
+          depth--;
+          break;
+        default:
+          length = OpcodeLength(pc);
+          break;
       }
-
-      TRACE("loop-assign module+%-6d %s func+%d: 0x%02x %s", baserel(pc),
-            indentation(), startrel(pc), opcode,
-            WasmOpcodes::OpcodeName(opcode));
-
-      if (assigned_index >= 0) {
-        TRACE(" (assigned local #%d)\n", assigned_index);
-      } else {
-        TRACE("\n");
-      }
-
+      if (depth <= 0) break;
       pc += length;
-      arity_stack.push_back(arity);
-      while (arity_stack.back() == 0) {
-        arity_stack.pop_back();
-        if (arity_stack.empty()) return assigned;  // reached end of loop
-        arity_stack.back()--;
-      }
     }
     return assigned;
   }
+
+  inline wasm::WasmCodePosition position() {
+    int offset = static_cast<int>(pc_ - start_);
+    DCHECK_EQ(pc_ - start_, offset);  // overflows cannot happen
+    return offset;
+  }
 };
 
 bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
@@ -1647,19 +1486,18 @@
                           FunctionBody& body) {
   Zone zone(allocator);
   SR_WasmDecoder decoder(&zone, nullptr, body);
-  TreeResult result = decoder.Decode();
-  return result;
+  decoder.Decode();
+  return decoder.toResult<Tree*>(nullptr);
 }
 
 TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
                         TFBuilder* builder, FunctionBody& body) {
   Zone zone(allocator);
   SR_WasmDecoder decoder(&zone, builder, body);
-  TreeResult result = decoder.Decode();
-  return result;
+  decoder.Decode();
+  return decoder.toResult<Tree*>(nullptr);
 }
 
-
 std::ostream& operator<<(std::ostream& os, const Tree& tree) {
   if (tree.pc == nullptr) {
     os << "null";
@@ -1675,28 +1513,22 @@
   return os;
 }
 
-
-ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte* pc,
-                                                      const byte* limit,
-                                                      int* length,
-                                                      uint32_t* result) {
-  Decoder decoder(pc, limit);
-  *result = decoder.checked_read_u32v(pc, 0, length);
-  if (decoder.ok()) return kNoError;
-  return (limit - pc) > 1 ? kInvalidLEB128 : kMissingLEB128;
-}
-
 int OpcodeLength(const byte* pc, const byte* end) {
   WasmDecoder decoder(nullptr, nullptr, pc, end);
   return decoder.OpcodeLength(pc);
 }
 
-int OpcodeArity(ModuleEnv* module, FunctionSig* sig, const byte* pc,
-                const byte* end) {
-  WasmDecoder decoder(module, sig, pc, end);
+int OpcodeArity(const byte* pc, const byte* end) {
+  WasmDecoder decoder(nullptr, nullptr, pc, end);
   return decoder.OpcodeArity(pc);
 }
 
+void PrintAstForDebugging(const byte* start, const byte* end) {
+  FunctionBody body = {nullptr, nullptr, start, start, end};
+  base::AccountingAllocator allocator;
+  PrintAst(&allocator, body);
+}
+
 void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body) {
   Zone zone(allocator);
   SR_WasmDecoder decoder(&zone, nullptr, body);
@@ -1713,7 +1545,7 @@
   decoder.DecodeLocalDecls(decls);
   const byte* pc = decoder.pc();
   if (body.start != decoder.pc()) {
-    printf("// locals:");
+    os << "// locals: ";
     for (auto p : decls.local_types) {
       LocalType type = p.first;
       uint32_t count = p.second;
@@ -1724,64 +1556,90 @@
     for (const byte* locals = body.start; locals < pc; locals++) {
       printf(" 0x%02x,", *locals);
     }
-    printf("\n");
+    os << std::endl;
   }
 
-  printf("// body: \n");
-  std::vector<int> arity_stack;
+  os << "// body: \n";
+  int control_depth = 0;
   while (pc < body.end) {
-    int arity = decoder.OpcodeArity(pc);
     size_t length = decoder.OpcodeLength(pc);
 
-    for (auto arity : arity_stack) {
-      printf("  ");
-      USE(arity);
-    }
-
     WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+    if (opcode == kExprElse) control_depth--;
+
+    for (int i = 0; i < control_depth && i < 32; i++) printf("  ");
     printf("k%s,", WasmOpcodes::OpcodeName(opcode));
 
     for (size_t i = 1; i < length; i++) {
       printf(" 0x%02x,", pc[i]);
     }
 
-    if (body.module) {
-      switch (opcode) {
-        case kExprCallIndirect: {
-          SignatureIndexOperand operand(&decoder, pc);
-          if (decoder.Validate(pc, operand)) {
-            os << " // sig #" << operand.index << ": " << *operand.sig;
-          }
-          break;
-        }
-        case kExprCallImport: {
-          ImportIndexOperand operand(&decoder, pc);
-          if (decoder.Validate(pc, operand)) {
-            os << " // import #" << operand.index << ": " << *operand.sig;
-          }
-          break;
-        }
-        case kExprCallFunction: {
-          FunctionIndexOperand operand(&decoder, pc);
-          if (decoder.Validate(pc, operand)) {
-            os << " // function #" << operand.index << ": " << *operand.sig;
-          }
-          break;
-        }
-        default:
-          break;
+    switch (opcode) {
+      case kExprIf:
+      case kExprElse:
+      case kExprLoop:
+      case kExprBlock:
+        os << "   // @" << static_cast<int>(pc - body.start);
+        control_depth++;
+        break;
+      case kExprEnd:
+        os << "   // @" << static_cast<int>(pc - body.start);
+        control_depth--;
+        break;
+      case kExprBr: {
+        BreakDepthOperand operand(&decoder, pc);
+        os << "   // arity=" << operand.arity << " depth=" << operand.depth;
+        break;
       }
-    }
+      case kExprBrIf: {
+        BreakDepthOperand operand(&decoder, pc);
+        os << "   // arity=" << operand.arity << " depth" << operand.depth;
+        break;
+      }
+      case kExprBrTable: {
+        BranchTableOperand operand(&decoder, pc);
+        os << "   // arity=" << operand.arity
+           << " entries=" << operand.table_count;
+        break;
+      }
+      case kExprCallIndirect: {
+        CallIndirectOperand operand(&decoder, pc);
+        if (decoder.Validate(pc, operand)) {
+          os << "   // sig #" << operand.index << ": " << *operand.sig;
+        } else {
+          os << " // arity=" << operand.arity << " sig #" << operand.index;
+        }
+        break;
+      }
+      case kExprCallImport: {
+        CallImportOperand operand(&decoder, pc);
+        if (decoder.Validate(pc, operand)) {
+          os << "   // import #" << operand.index << ": " << *operand.sig;
+        } else {
+          os << " // arity=" << operand.arity << " import #" << operand.index;
+        }
+        break;
+      }
+      case kExprCallFunction: {
+        CallFunctionOperand operand(&decoder, pc);
+        if (decoder.Validate(pc, operand)) {
+          os << "   // function #" << operand.index << ": " << *operand.sig;
+        } else {
+          os << " // arity=" << operand.arity << " function #" << operand.index;
+        }
+        break;
+      }
+      case kExprReturn: {
+        ReturnArityOperand operand(&decoder, pc);
+        os << "   // arity=" << operand.arity;
+        break;
+      }
+      default:
+        break;
+      }
 
     pc += length;
-    printf("\n");
-
-    arity_stack.push_back(arity);
-    while (arity_stack.back() == 0) {
-      arity_stack.pop_back();
-      if (arity_stack.empty()) break;
-      arity_stack.back()--;
-    }
+    os << std::endl;
   }
 }
 
diff --git a/src/wasm/ast-decoder.h b/src/wasm/ast-decoder.h
index 5376e7b..9e96053 100644
--- a/src/wasm/ast-decoder.h
+++ b/src/wasm/ast-decoder.h
@@ -89,66 +89,81 @@
   }
 };
 
-struct Block;
+struct Control;
 struct BreakDepthOperand {
+  uint32_t arity;
   uint32_t depth;
-  Block* target;
+  Control* target;
   int length;
   inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
-    depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
+    int len1 = 0;
+    int len2 = 0;
+    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
+    depth = decoder->checked_read_u32v(pc, 1 + len1, &len2, "break depth");
+    length = len1 + len2;
     target = nullptr;
   }
 };
 
-struct BlockCountOperand {
-  uint32_t count;
-  int length;
-  inline BlockCountOperand(Decoder* decoder, const byte* pc) {
-    count = decoder->checked_read_u32v(pc, 1, &length, "block count");
-  }
-};
-
-struct SignatureIndexOperand {
+struct CallIndirectOperand {
+  uint32_t arity;
   uint32_t index;
   FunctionSig* sig;
   int length;
-  inline SignatureIndexOperand(Decoder* decoder, const byte* pc) {
-    index = decoder->checked_read_u32v(pc, 1, &length, "signature index");
+  inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
+    int len1 = 0;
+    int len2 = 0;
+    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
+    index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "signature index");
+    length = len1 + len2;
     sig = nullptr;
   }
 };
 
-struct FunctionIndexOperand {
+struct CallFunctionOperand {
+  uint32_t arity;
   uint32_t index;
   FunctionSig* sig;
   int length;
-  inline FunctionIndexOperand(Decoder* decoder, const byte* pc) {
-    index = decoder->checked_read_u32v(pc, 1, &length, "function index");
+  inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
+    int len1 = 0;
+    int len2 = 0;
+    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
+    index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
+    length = len1 + len2;
     sig = nullptr;
   }
 };
 
-struct ImportIndexOperand {
+struct CallImportOperand {
+  uint32_t arity;
   uint32_t index;
   FunctionSig* sig;
   int length;
-  inline ImportIndexOperand(Decoder* decoder, const byte* pc) {
-    index = decoder->checked_read_u32v(pc, 1, &length, "import index");
+  inline CallImportOperand(Decoder* decoder, const byte* pc) {
+    int len1 = 0;
+    int len2 = 0;
+    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
+    index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "import index");
+    length = len1 + len2;
     sig = nullptr;
   }
 };
 
 struct BranchTableOperand {
+  uint32_t arity;
   uint32_t table_count;
   const byte* table;
   int length;
   inline BranchTableOperand(Decoder* decoder, const byte* pc) {
-    int varint_length;
+    int len1 = 0;
+    int len2 = 0;
+    arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
     table_count =
-        decoder->checked_read_u32v(pc, 1, &varint_length, "expected #entries");
-    length = varint_length + (table_count + 1) * sizeof(uint32_t);
+        decoder->checked_read_u32v(pc, 1 + len1, &len2, "table count");
+    length = len1 + len2 + (table_count + 1) * sizeof(uint32_t);
 
-    uint32_t table_start = 1 + varint_length;
+    uint32_t table_start = 1 + len1 + len2;
     if (decoder->check(pc, table_start, (table_count + 1) * sizeof(uint32_t),
                        "expected <table entries>")) {
       table = pc + table_start;
@@ -177,6 +192,15 @@
   }
 };
 
+struct ReturnArityOperand {
+  uint32_t arity;
+  int length;
+
+  inline ReturnArityOperand(Decoder* decoder, const byte* pc) {
+    arity = decoder->checked_read_u32v(pc, 1, &length, "return count");
+  }
+};
+
 typedef compiler::WasmGraphBuilder TFBuilder;
 struct ModuleEnv;  // forward declaration of module interface.
 
@@ -200,6 +224,9 @@
                         TFBuilder* builder, FunctionBody& body);
 void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body);
 
+// A simplified form of AST printing, e.g. from a debugger.
+void PrintAstForDebugging(const byte* start, const byte* end);
+
 inline TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
                                  ModuleEnv* module, FunctionSig* sig,
                                  const byte* start, const byte* end) {
@@ -215,11 +242,6 @@
   return BuildTFGraph(allocator, builder, body);
 }
 
-enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
-
-ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
-                                                      int*, uint32_t*);
-
 struct AstLocalDecls {
   // The size of the encoded declarations.
   uint32_t decls_encoded_size;  // size of encoded declarations
@@ -243,8 +265,8 @@
 int OpcodeLength(const byte* pc, const byte* end);
 
 // Computes the arity (number of sub-nodes) of the opcode at the given address.
-int OpcodeArity(ModuleEnv* module, FunctionSig* sig, const byte* pc,
-                const byte* end);
+int OpcodeArity(const byte* pc, const byte* end);
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/decoder.h b/src/wasm/decoder.h
index f9de2e1..685f5d0 100644
--- a/src/wasm/decoder.h
+++ b/src/wasm/decoder.h
@@ -5,9 +5,11 @@
 #ifndef V8_WASM_DECODER_H_
 #define V8_WASM_DECODER_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/smart-pointers.h"
 #include "src/flags.h"
 #include "src/signature.h"
+#include "src/utils.h"
 #include "src/wasm/wasm-result.h"
 #include "src/zone-containers.h"
 
@@ -47,7 +49,7 @@
   inline bool check(const byte* base, int offset, int length, const char* msg) {
     DCHECK_GE(base, start_);
     if ((base + offset + length) > limit_) {
-      error(base, base + offset, msg);
+      error(base, base + offset, "%s", msg);
       return false;
     }
     return true;
@@ -258,12 +260,13 @@
     }
   }
 
-  void error(const char* msg) { error(pc_, nullptr, msg); }
+  void error(const char* msg) { error(pc_, nullptr, "%s", msg); }
 
-  void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
+  void error(const byte* pc, const char* msg) { error(pc, nullptr, "%s", msg); }
 
   // Sets internal error state.
-  void error(const byte* pc, const byte* pt, const char* format, ...) {
+  void PRINTF_FORMAT(4, 5)
+      error(const byte* pc, const byte* pt, const char* format, ...) {
     if (ok()) {
 #if DEBUG
       if (FLAG_wasm_break_on_decoder_error) {
@@ -392,7 +395,7 @@
         return 0;
       }
       if ((b & 0x80) != 0) {
-        error(base, ptr, msg);
+        error(base, ptr, "%s", msg);
         return 0;
       }
     }
diff --git a/src/wasm/encoder.cc b/src/wasm/encoder.cc
index 92e6b11..39a2f5a 100644
--- a/src/wasm/encoder.cc
+++ b/src/wasm/encoder.cc
@@ -10,6 +10,7 @@
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/encoder.h"
+#include "src/wasm/leb-helper.h"
 #include "src/wasm/wasm-macro-gen.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
@@ -38,55 +39,34 @@
   *b += 1;
 }
 
-
 void EmitUint16(byte** b, uint16_t x) {
   WriteUnalignedUInt16(*b, x);
   *b += 2;
 }
 
-
 void EmitUint32(byte** b, uint32_t x) {
   WriteUnalignedUInt32(*b, x);
   *b += 4;
 }
 
+void EmitVarInt(byte** b, size_t val) {
+  LEBHelper::write_u32v(b, static_cast<uint32_t>(val));
+}
+
 // Sections all start with a size, but it's unknown at the start.
 // We generate a large varint which we then fixup later when the size is known.
 //
 // TODO(jfb) Not strictly necessary since sizes are calculated ahead of time.
-const size_t padded_varint = 5;
-
-void EmitVarInt(byte** b, size_t val) {
-  while (true) {
-    size_t next = val >> 7;
-    byte out = static_cast<byte>(val & 0x7f);
-    if (next) {
-      *((*b)++) = 0x80 | out;
-      val = next;
-    } else {
-      *((*b)++) = out;
-      break;
-    }
-  }
-}
-
-size_t SizeOfVarInt(size_t value) {
-  size_t size = 0;
-  do {
-    size++;
-    value = value >> 7;
-  } while (value > 0);
-  return size;
-}
+const size_t kPaddedVarintSize = 5;
 
 void FixupSection(byte* start, byte* end) {
-  // Same as EmitVarInt, but fixed-width with zeroes in the MSBs.
-  size_t val = end - start - padded_varint;
+  // Same as LEBHelper::write_u32v, but fixed-width with zeroes in the MSBs.
+  size_t val = end - start - kPaddedVarintSize;
   TRACE("  fixup %u\n", (unsigned)val);
-  for (size_t pos = 0; pos != padded_varint; ++pos) {
+  for (size_t pos = 0; pos != kPaddedVarintSize; ++pos) {
     size_t next = val >> 7;
     byte out = static_cast<byte>(val & 0x7f);
-    if (pos != padded_varint - 1) {
+    if (pos != kPaddedVarintSize - 1) {
       *(start++) = 0x80 | out;
       val = next;
     } else {
@@ -98,77 +78,63 @@
 
 // Returns the start of the section, where the section VarInt size is.
 byte* EmitSection(WasmSection::Code code, byte** b) {
-  byte* start = *b;
+  // Emit the section name.
   const char* name = WasmSection::getName(code);
-  size_t length = WasmSection::getNameLength(code);
   TRACE("emit section: %s\n", name);
-  for (size_t padding = 0; padding != padded_varint; ++padding) {
-    EmitUint8(b, 0xff);  // Will get fixed up later.
-  }
+  size_t length = WasmSection::getNameLength(code);
   EmitVarInt(b, length);  // Section name string size.
   for (size_t i = 0; i != length; ++i) EmitUint8(b, name[i]);
+
+  // Emit a placeholder for the length.
+  byte* start = *b;
+  for (size_t padding = 0; padding != kPaddedVarintSize; ++padding) {
+    EmitUint8(b, 0xff);  // Will get fixed up later.
+  }
+
   return start;
 }
 }  // namespace
 
-struct WasmFunctionBuilder::Type {
-  bool param_;
-  LocalType type_;
-};
-
-
 WasmFunctionBuilder::WasmFunctionBuilder(Zone* zone)
-    : return_type_(kAstI32),
-      locals_(zone),
-      exported_(0),
-      external_(0),
-      body_(zone),
-      local_indices_(zone),
-      name_(zone) {}
+    : locals_(zone), exported_(0), body_(zone), name_(zone) {}
 
-
-uint16_t WasmFunctionBuilder::AddParam(LocalType type) {
-  return AddVar(type, true);
+void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
+  byte buffer[8];
+  byte* ptr = buffer;
+  LEBHelper::write_u32v(&ptr, val);
+  for (byte* p = buffer; p < ptr; p++) {
+    body_.push_back(*p);
+  }
 }
 
-
-uint16_t WasmFunctionBuilder::AddLocal(LocalType type) {
-  return AddVar(type, false);
+void WasmFunctionBuilder::SetSignature(FunctionSig* sig) {
+  DCHECK(!locals_.has_sig());
+  locals_.set_sig(sig);
 }
 
-
-uint16_t WasmFunctionBuilder::AddVar(LocalType type, bool param) {
-  locals_.push_back({param, type});
-  return static_cast<uint16_t>(locals_.size() - 1);
+uint32_t WasmFunctionBuilder::AddLocal(LocalType type) {
+  DCHECK(locals_.has_sig());
+  return locals_.AddLocals(1, type);
 }
 
+void WasmFunctionBuilder::EmitGetLocal(uint32_t local_index) {
+  EmitWithVarInt(kExprGetLocal, local_index);
+}
 
-void WasmFunctionBuilder::ReturnType(LocalType type) { return_type_ = type; }
-
+void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) {
+  EmitWithVarInt(kExprSetLocal, local_index);
+}
 
 void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
-  EmitCode(code, code_size, nullptr, 0);
-}
-
-
-void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size,
-                                   const uint32_t* local_indices,
-                                   uint32_t indices_size) {
-  size_t size = body_.size();
   for (size_t i = 0; i < code_size; i++) {
     body_.push_back(code[i]);
   }
-  for (size_t i = 0; i < indices_size; i++) {
-    local_indices_.push_back(local_indices[i] + static_cast<uint32_t>(size));
-  }
 }
 
-
 void WasmFunctionBuilder::Emit(WasmOpcode opcode) {
   body_.push_back(static_cast<byte>(opcode));
 }
 
-
 void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
   body_.push_back(static_cast<byte>(opcode));
   body_.push_back(immediate);
@@ -184,47 +150,22 @@
 void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode,
                                          uint32_t immediate) {
   body_.push_back(static_cast<byte>(opcode));
-  size_t immediate_size = SizeOfVarInt(immediate);
-  body_.insert(body_.end(), immediate_size, 0);
-  byte* p = &body_[body_.size() - immediate_size];
-  EmitVarInt(&p, immediate);
+  EmitVarInt(immediate);
 }
 
-uint32_t WasmFunctionBuilder::EmitEditableVarIntImmediate() {
-  // Guess that the immediate will be 1 byte. If it is more, we'll have to
-  // shift everything down.
-  body_.push_back(0);
-  return static_cast<uint32_t>(body_.size()) - 1;
-}
-
-void WasmFunctionBuilder::EditVarIntImmediate(uint32_t offset,
-                                              const uint32_t immediate) {
-  uint32_t immediate_size = static_cast<uint32_t>(SizeOfVarInt(immediate));
-  // In EmitEditableVarIntImmediate, we guessed that we'd only need one byte.
-  // If we need more, shift everything down to make room for the larger
-  // immediate.
-  if (immediate_size > 1) {
-    uint32_t diff = immediate_size - 1;
-    body_.insert(body_.begin() + offset, diff, 0);
-
-    for (size_t i = 0; i < local_indices_.size(); ++i) {
-      if (local_indices_[i] >= offset) {
-        local_indices_[i] += diff;
-      }
-    }
+void WasmFunctionBuilder::EmitI32Const(int32_t value) {
+  // TODO(titzer): variable-length signed and unsigned i32 constants.
+  if (-128 <= value && value <= 127) {
+    EmitWithU8(kExprI8Const, static_cast<byte>(value));
+  } else {
+    byte code[] = {WASM_I32V_5(value)};
+    EmitCode(code, sizeof(code));
   }
-  DCHECK(offset + immediate_size <= body_.size());
-  byte* p = &body_[offset];
-  EmitVarInt(&p, immediate);
 }
 
-
 void WasmFunctionBuilder::Exported(uint8_t flag) { exported_ = flag; }
 
-
-void WasmFunctionBuilder::External(uint8_t flag) { external_ = flag; }
-
-void WasmFunctionBuilder::SetName(const unsigned char* name, int name_length) {
+void WasmFunctionBuilder::SetName(const char* name, int name_length) {
   name_.clear();
   if (name_length > 0) {
     for (int i = 0; i < name_length; i++) {
@@ -233,139 +174,43 @@
   }
 }
 
-
 WasmFunctionEncoder* WasmFunctionBuilder::Build(Zone* zone,
                                                 WasmModuleBuilder* mb) const {
   WasmFunctionEncoder* e =
-      new (zone) WasmFunctionEncoder(zone, return_type_, exported_, external_);
-  uint16_t* var_index = zone->NewArray<uint16_t>(locals_.size());
-  IndexVars(e, var_index);
-  if (body_.size() > 0) {
-    // TODO(titzer): iterate over local indexes, not the bytes.
-    const byte* start = &body_[0];
-    const byte* end = start + body_.size();
-    size_t local_index = 0;
-    for (size_t i = 0; i < body_.size();) {
-      if (local_index < local_indices_.size() &&
-          i == local_indices_[local_index]) {
-        int length = 0;
-        uint32_t index;
-        ReadUnsignedLEB128Operand(start + i, end, &length, &index);
-        uint16_t new_index = var_index[index];
-        const std::vector<uint8_t>& index_vec = UnsignedLEB128From(new_index);
-        for (size_t j = 0; j < index_vec.size(); j++) {
-          e->body_.push_back(index_vec.at(j));
-        }
-        i += length;
-        local_index++;
-      } else {
-        e->body_.push_back(*(start + i));
-        i++;
-      }
-    }
-  }
-  FunctionSig::Builder sig(zone, return_type_ == kAstStmt ? 0 : 1,
-                           e->params_.size());
-  if (return_type_ != kAstStmt) {
-    sig.AddReturn(static_cast<LocalType>(return_type_));
-  }
-  for (size_t i = 0; i < e->params_.size(); i++) {
-    sig.AddParam(static_cast<LocalType>(e->params_[i]));
-  }
-  e->signature_index_ = mb->AddSignature(sig.Build());
+      new (zone) WasmFunctionEncoder(zone, locals_, exported_);
+  // TODO(titzer): lame memcpy here.
+  e->body_.insert(e->body_.begin(), body_.begin(), body_.end());
+  e->signature_index_ = mb->AddSignature(locals_.get_sig());
   e->name_.insert(e->name_.begin(), name_.begin(), name_.end());
   return e;
 }
 
-
-void WasmFunctionBuilder::IndexVars(WasmFunctionEncoder* e,
-                                    uint16_t* var_index) const {
-  uint16_t param = 0;
-  uint16_t i32 = 0;
-  uint16_t i64 = 0;
-  uint16_t f32 = 0;
-  uint16_t f64 = 0;
-  for (size_t i = 0; i < locals_.size(); i++) {
-    if (locals_.at(i).param_) {
-      param++;
-    } else if (locals_.at(i).type_ == kAstI32) {
-      i32++;
-    } else if (locals_.at(i).type_ == kAstI64) {
-      i64++;
-    } else if (locals_.at(i).type_ == kAstF32) {
-      f32++;
-    } else if (locals_.at(i).type_ == kAstF64) {
-      f64++;
-    }
-  }
-  e->local_i32_count_ = i32;
-  e->local_i64_count_ = i64;
-  e->local_f32_count_ = f32;
-  e->local_f64_count_ = f64;
-  f64 = param + i32 + i64 + f32;
-  f32 = param + i32 + i64;
-  i64 = param + i32;
-  i32 = param;
-  param = 0;
-  for (size_t i = 0; i < locals_.size(); i++) {
-    if (locals_.at(i).param_) {
-      e->params_.push_back(locals_.at(i).type_);
-      var_index[i] = param++;
-    } else if (locals_.at(i).type_ == kAstI32) {
-      var_index[i] = i32++;
-    } else if (locals_.at(i).type_ == kAstI64) {
-      var_index[i] = i64++;
-    } else if (locals_.at(i).type_ == kAstF32) {
-      var_index[i] = f32++;
-    } else if (locals_.at(i).type_ == kAstF64) {
-      var_index[i] = f64++;
-    }
-  }
-}
-
-
-WasmFunctionEncoder::WasmFunctionEncoder(Zone* zone, LocalType return_type,
-                                         bool exported, bool external)
-    : params_(zone),
-      exported_(exported),
-      external_(external),
-      body_(zone),
-      name_(zone) {}
-
+WasmFunctionEncoder::WasmFunctionEncoder(Zone* zone, LocalDeclEncoder locals,
+                                         bool exported)
+    : locals_(locals), exported_(exported), body_(zone), name_(zone) {}
 
 uint32_t WasmFunctionEncoder::HeaderSize() const {
   uint32_t size = 3;
-  if (!external_) size += 2;
+  size += 2;
   if (HasName()) {
     uint32_t name_size = NameSize();
-    size += static_cast<uint32_t>(SizeOfVarInt(name_size)) + name_size;
+    size +=
+        static_cast<uint32_t>(LEBHelper::sizeof_u32v(name_size)) + name_size;
   }
   return size;
 }
 
-
 uint32_t WasmFunctionEncoder::BodySize(void) const {
-  // TODO(titzer): embed a LocalDeclEncoder in the WasmFunctionEncoder
-  LocalDeclEncoder local_decl;
-  local_decl.AddLocals(local_i32_count_, kAstI32);
-  local_decl.AddLocals(local_i64_count_, kAstI64);
-  local_decl.AddLocals(local_f32_count_, kAstF32);
-  local_decl.AddLocals(local_f64_count_, kAstF64);
-
-  return external_ ? 0
-                   : static_cast<uint32_t>(body_.size() + local_decl.Size());
+  return static_cast<uint32_t>(body_.size() + locals_.Size());
 }
 
-
 uint32_t WasmFunctionEncoder::NameSize() const {
   return HasName() ? static_cast<uint32_t>(name_.size()) : 0;
 }
 
-
 void WasmFunctionEncoder::Serialize(byte* buffer, byte** header,
                                     byte** body) const {
   uint8_t decl_bits = (exported_ ? kDeclFunctionExport : 0) |
-                      (external_ ? kDeclFunctionImport : 0) |
                       (HasName() ? kDeclFunctionName : 0);
 
   EmitUint8(header, decl_bits);
@@ -378,25 +223,14 @@
     }
   }
 
-
-  if (!external_) {
-    // TODO(titzer): embed a LocalDeclEncoder in the WasmFunctionEncoder
-    LocalDeclEncoder local_decl;
-    local_decl.AddLocals(local_i32_count_, kAstI32);
-    local_decl.AddLocals(local_i64_count_, kAstI64);
-    local_decl.AddLocals(local_f32_count_, kAstF32);
-    local_decl.AddLocals(local_f64_count_, kAstF64);
-
-    EmitUint16(header, static_cast<uint16_t>(body_.size() + local_decl.Size()));
-    (*header) += local_decl.Emit(*header);
-    if (body_.size() > 0) {
-      std::memcpy(*header, &body_[0], body_.size());
-      (*header) += body_.size();
-    }
+  EmitUint16(header, static_cast<uint16_t>(body_.size() + locals_.Size()));
+  (*header) += locals_.Emit(*header);
+  if (body_.size() > 0) {
+    std::memcpy(*header, &body_[0], body_.size());
+    (*header) += body_.size();
   }
 }
 
-
 WasmDataSegmentEncoder::WasmDataSegmentEncoder(Zone* zone, const byte* data,
                                                uint32_t size, uint32_t dest)
     : data_(zone), dest_(dest) {
@@ -405,18 +239,15 @@
   }
 }
 
-
 uint32_t WasmDataSegmentEncoder::HeaderSize() const {
   static const int kDataSegmentSize = 13;
   return kDataSegmentSize;
 }
 
-
 uint32_t WasmDataSegmentEncoder::BodySize() const {
   return static_cast<uint32_t>(data_.size());
 }
 
-
 void WasmDataSegmentEncoder::Serialize(byte* buffer, byte** header,
                                        byte** body) const {
   EmitVarInt(header, dest_);
@@ -429,6 +260,7 @@
 WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
     : zone_(zone),
       signatures_(zone),
+      imports_(zone),
       functions_(zone),
       data_segments_(zone),
       indirect_functions_(zone),
@@ -436,12 +268,11 @@
       signature_map_(zone),
       start_function_index_(-1) {}
 
-uint16_t WasmModuleBuilder::AddFunction() {
+uint32_t WasmModuleBuilder::AddFunction() {
   functions_.push_back(new (zone_) WasmFunctionBuilder(zone_));
-  return static_cast<uint16_t>(functions_.size() - 1);
+  return static_cast<uint32_t>(functions_.size() - 1);
 }
 
-
 WasmFunctionBuilder* WasmModuleBuilder::FunctionAt(size_t index) {
   if (functions_.size() > index) {
     return functions_.at(index);
@@ -450,12 +281,10 @@
   }
 }
 
-
 void WasmModuleBuilder::AddDataSegment(WasmDataSegmentEncoder* data) {
   data_segments_.push_back(data);
 }
 
-
 bool WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
                                                         FunctionSig* b) const {
   if (a->return_count() < b->return_count()) return true;
@@ -473,30 +302,37 @@
   return false;
 }
 
-
-uint16_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
+uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
   SignatureMap::iterator pos = signature_map_.find(sig);
   if (pos != signature_map_.end()) {
     return pos->second;
   } else {
-    uint16_t index = static_cast<uint16_t>(signatures_.size());
+    uint32_t index = static_cast<uint32_t>(signatures_.size());
     signature_map_[sig] = index;
     signatures_.push_back(sig);
     return index;
   }
 }
 
-
-void WasmModuleBuilder::AddIndirectFunction(uint16_t index) {
+void WasmModuleBuilder::AddIndirectFunction(uint32_t index) {
   indirect_functions_.push_back(index);
 }
 
-void WasmModuleBuilder::MarkStartFunction(uint16_t index) {
+uint32_t WasmModuleBuilder::AddImport(const char* name, int name_length,
+                                      FunctionSig* sig) {
+  imports_.push_back({AddSignature(sig), name, name_length});
+  return static_cast<uint32_t>(imports_.size() - 1);
+}
+
+void WasmModuleBuilder::MarkStartFunction(uint32_t index) {
   start_function_index_ = index;
 }
 
 WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
   WasmModuleWriter* writer = new (zone) WasmModuleWriter(zone);
+  for (auto import : imports_) {
+    writer->imports_.push_back(import);
+  }
   for (auto function : functions_) {
     writer->functions_.push_back(function->Build(zone, this));
   }
@@ -516,15 +352,14 @@
   return writer;
 }
 
-
 uint32_t WasmModuleBuilder::AddGlobal(MachineType type, bool exported) {
   globals_.push_back(std::make_pair(type, exported));
   return static_cast<uint32_t>(globals_.size() - 1);
 }
 
-
 WasmModuleWriter::WasmModuleWriter(Zone* zone)
-    : functions_(zone),
+    : imports_(zone),
+      functions_(zone),
       data_segments_(zone),
       signatures_(zone),
       indirect_functions_(zone),
@@ -542,10 +377,11 @@
   }
 
   void AddSection(WasmSection::Code code, size_t other_size) {
-    Add(padded_varint + SizeOfVarInt(WasmSection::getNameLength(code)) +
+    Add(kPaddedVarintSize +
+            LEBHelper::sizeof_u32v(WasmSection::getNameLength(code)) +
             WasmSection::getNameLength(code),
         0);
-    if (other_size) Add(SizeOfVarInt(other_size), 0);
+    if (other_size) Add(LEBHelper::sizeof_u32v(other_size), 0);
   }
 };
 
@@ -554,11 +390,6 @@
 
   sizes.Add(2 * sizeof(uint32_t), 0);  // header
 
-  sizes.AddSection(WasmSection::Code::Memory, 0);
-  sizes.Add(kDeclMemorySize, 0);
-  TRACE("Size after memory: %u, %u\n", (unsigned)sizes.header_size,
-        (unsigned)sizes.body_size);
-
   if (globals_.size() > 0) {
     sizes.AddSection(WasmSection::Code::Globals, globals_.size());
     /* These globals never have names, so are always 3 bytes. */
@@ -570,15 +401,18 @@
   if (signatures_.size() > 0) {
     sizes.AddSection(WasmSection::Code::Signatures, signatures_.size());
     for (auto sig : signatures_) {
-      sizes.Add(
-          1 + SizeOfVarInt(sig->parameter_count()) + sig->parameter_count(), 0);
+      sizes.Add(1 + LEBHelper::sizeof_u32v(sig->parameter_count()) +
+                    sig->parameter_count() +
+                    LEBHelper::sizeof_u32v(sig->return_count()) +
+                    sig->return_count(),
+                0);
     }
     TRACE("Size after signatures: %u, %u\n", (unsigned)sizes.header_size,
           (unsigned)sizes.body_size);
   }
 
   if (functions_.size() > 0) {
-    sizes.AddSection(WasmSection::Code::Functions, functions_.size());
+    sizes.AddSection(WasmSection::Code::OldFunctions, functions_.size());
     for (auto function : functions_) {
       sizes.Add(function->HeaderSize() + function->BodySize(),
                 function->NameSize());
@@ -587,9 +421,36 @@
           (unsigned)sizes.body_size);
   }
 
+  if (imports_.size() > 0) {
+    sizes.AddSection(WasmSection::Code::ImportTable, imports_.size());
+    for (auto import : imports_) {
+      sizes.Add(LEBHelper::sizeof_u32v(import.sig_index), 0);
+      sizes.Add(LEBHelper::sizeof_u32v(import.name_length), 0);
+      sizes.Add(import.name_length, 0);
+      sizes.Add(1, 0);
+    }
+    TRACE("Size after imports: %u, %u\n", (unsigned)sizes.header_size,
+          (unsigned)sizes.body_size);
+  }
+
+  if (indirect_functions_.size() > 0) {
+    sizes.AddSection(WasmSection::Code::FunctionTable,
+                     indirect_functions_.size());
+    for (auto function_index : indirect_functions_) {
+      sizes.Add(LEBHelper::sizeof_u32v(function_index), 0);
+    }
+    TRACE("Size after indirect functions: %u, %u\n",
+          (unsigned)sizes.header_size, (unsigned)sizes.body_size);
+  }
+
+  sizes.AddSection(WasmSection::Code::Memory, 0);
+  sizes.Add(kDeclMemorySize, 0);
+  TRACE("Size after memory: %u, %u\n", (unsigned)sizes.header_size,
+        (unsigned)sizes.body_size);
+
   if (start_function_index_ >= 0) {
     sizes.AddSection(WasmSection::Code::StartFunction, 0);
-    sizes.Add(SizeOfVarInt(start_function_index_), 0);
+    sizes.Add(LEBHelper::sizeof_u32v(start_function_index_), 0);
     TRACE("Size after start: %u, %u\n", (unsigned)sizes.header_size,
           (unsigned)sizes.body_size);
   }
@@ -603,16 +464,6 @@
           (unsigned)sizes.body_size);
   }
 
-  if (indirect_functions_.size() > 0) {
-    sizes.AddSection(WasmSection::Code::FunctionTable,
-                     indirect_functions_.size());
-    for (auto function_index : indirect_functions_) {
-      sizes.Add(SizeOfVarInt(function_index), 0);
-    }
-    TRACE("Size after indirect functions: %u, %u\n",
-          (unsigned)sizes.header_size, (unsigned)sizes.body_size);
-  }
-
   if (sizes.body_size > 0) {
     sizes.AddSection(WasmSection::Code::End, 0);
     TRACE("Size after end: %u, %u\n", (unsigned)sizes.header_size,
@@ -629,16 +480,6 @@
   EmitUint32(&header, kWasmMagic);
   EmitUint32(&header, kWasmVersion);
 
-  // -- emit memory declaration ------------------------------------------------
-  {
-    byte* section = EmitSection(WasmSection::Code::Memory, &header);
-    EmitVarInt(&header, 16);  // min memory size
-    EmitVarInt(&header, 16);  // max memory size
-    EmitUint8(&header, 0);    // memory export
-    static_assert(kDeclMemorySize == 3, "memory size must match emit above");
-    FixupSection(section, header);
-  }
-
   // -- emit globals -----------------------------------------------------------
   if (globals_.size() > 0) {
     byte* section = EmitSection(WasmSection::Code::Globals, &header);
@@ -658,22 +499,36 @@
     EmitVarInt(&header, signatures_.size());
 
     for (FunctionSig* sig : signatures_) {
+      EmitUint8(&header, kWasmFunctionTypeForm);
       EmitVarInt(&header, sig->parameter_count());
-      if (sig->return_count() > 0) {
-        EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn()));
-      } else {
-        EmitUint8(&header, kLocalVoid);
-      }
       for (size_t j = 0; j < sig->parameter_count(); j++) {
         EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
       }
+      EmitVarInt(&header, sig->return_count());
+      for (size_t j = 0; j < sig->return_count(); j++) {
+        EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn(j)));
+      }
+    }
+    FixupSection(section, header);
+  }
+
+  // -- emit imports -----------------------------------------------------------
+  if (imports_.size() > 0) {
+    byte* section = EmitSection(WasmSection::Code::ImportTable, &header);
+    EmitVarInt(&header, imports_.size());
+    for (auto import : imports_) {
+      EmitVarInt(&header, import.sig_index);
+      EmitVarInt(&header, import.name_length);
+      std::memcpy(header, import.name, import.name_length);
+      header += import.name_length;
+      EmitVarInt(&header, 0);
     }
     FixupSection(section, header);
   }
 
   // -- emit functions ---------------------------------------------------------
   if (functions_.size() > 0) {
-    byte* section = EmitSection(WasmSection::Code::Functions, &header);
+    byte* section = EmitSection(WasmSection::Code::OldFunctions, &header);
     EmitVarInt(&header, functions_.size());
 
     for (auto func : functions_) {
@@ -682,6 +537,27 @@
     FixupSection(section, header);
   }
 
+  // -- emit function table ----------------------------------------------------
+  if (indirect_functions_.size() > 0) {
+    byte* section = EmitSection(WasmSection::Code::FunctionTable, &header);
+    EmitVarInt(&header, indirect_functions_.size());
+
+    for (auto index : indirect_functions_) {
+      EmitVarInt(&header, index);
+    }
+    FixupSection(section, header);
+  }
+
+  // -- emit memory declaration ------------------------------------------------
+  {
+    byte* section = EmitSection(WasmSection::Code::Memory, &header);
+    EmitVarInt(&header, 16);  // min memory size
+    EmitVarInt(&header, 16);  // max memory size
+    EmitUint8(&header, 0);    // memory export
+    static_assert(kDeclMemorySize == 3, "memory size must match emit above");
+    FixupSection(section, header);
+  }
+
   // -- emit start function index ----------------------------------------------
   if (start_function_index_ >= 0) {
     byte* section = EmitSection(WasmSection::Code::StartFunction, &header);
@@ -700,17 +576,6 @@
     FixupSection(section, header);
   }
 
-  // -- emit function table ----------------------------------------------------
-  if (indirect_functions_.size() > 0) {
-    byte* section = EmitSection(WasmSection::Code::FunctionTable, &header);
-    EmitVarInt(&header, indirect_functions_.size());
-
-    for (auto index : indirect_functions_) {
-      EmitVarInt(&header, index);
-    }
-    FixupSection(section, header);
-  }
-
   if (sizes.body_size > 0) {
     byte* section = EmitSection(WasmSection::Code::End, &header);
     FixupSection(section, header);
@@ -718,22 +583,6 @@
 
   return new (zone) WasmModuleIndex(buffer, buffer + sizes.total());
 }
-
-
-std::vector<uint8_t> UnsignedLEB128From(uint32_t result) {
-  std::vector<uint8_t> output;
-  uint8_t next = 0;
-  int shift = 0;
-  do {
-    next = static_cast<uint8_t>(result >> shift);
-    if (((result >> shift) & 0xFFFFFF80) != 0) {
-      next = next | 0x80;
-    }
-    output.push_back(next);
-    shift += 7;
-  } while ((next & 0x80) != 0);
-  return output;
-}
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/encoder.h b/src/wasm/encoder.h
index 49a7bf7..0f2118d 100644
--- a/src/wasm/encoder.h
+++ b/src/wasm/encoder.h
@@ -10,6 +10,7 @@
 
 #include "src/base/smart-pointers.h"
 
+#include "src/wasm/wasm-macro-gen.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
 #include "src/wasm/wasm-result.h"
@@ -28,55 +29,42 @@
   void Serialize(byte* buffer, byte** header, byte** body) const;
 
  private:
-  WasmFunctionEncoder(Zone* zone, LocalType return_type, bool exported,
-                      bool external);
+  WasmFunctionEncoder(Zone* zone, LocalDeclEncoder locals, bool exported);
   friend class WasmFunctionBuilder;
-  uint16_t signature_index_;
-  ZoneVector<LocalType> params_;
-  uint16_t local_i32_count_;
-  uint16_t local_i64_count_;
-  uint16_t local_f32_count_;
-  uint16_t local_f64_count_;
+  uint32_t signature_index_;
+  LocalDeclEncoder locals_;
   bool exported_;
-  bool external_;
   ZoneVector<uint8_t> body_;
   ZoneVector<char> name_;
 
-  bool HasName() const { return (exported_ || external_) && name_.size() > 0; }
+  bool HasName() const { return exported_ && name_.size() > 0; }
 };
 
 class WasmFunctionBuilder : public ZoneObject {
  public:
-  uint16_t AddParam(LocalType type);
-  uint16_t AddLocal(LocalType type);
-  void ReturnType(LocalType type);
+  void SetSignature(FunctionSig* sig);
+  uint32_t AddLocal(LocalType type);
+  void EmitVarInt(uint32_t val);
   void EmitCode(const byte* code, uint32_t code_size);
-  void EmitCode(const byte* code, uint32_t code_size,
-                const uint32_t* local_indices, uint32_t indices_size);
   void Emit(WasmOpcode opcode);
+  void EmitGetLocal(uint32_t index);
+  void EmitSetLocal(uint32_t index);
+  void EmitI32Const(int32_t val);
   void EmitWithU8(WasmOpcode opcode, const byte immediate);
   void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
   void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
-  uint32_t EmitEditableVarIntImmediate();
-  void EditVarIntImmediate(uint32_t offset, const uint32_t immediate);
   void Exported(uint8_t flag);
-  void External(uint8_t flag);
-  void SetName(const unsigned char* name, int name_length);
+  void SetName(const char* name, int name_length);
   WasmFunctionEncoder* Build(Zone* zone, WasmModuleBuilder* mb) const;
 
  private:
   explicit WasmFunctionBuilder(Zone* zone);
   friend class WasmModuleBuilder;
-  LocalType return_type_;
-  struct Type;
-  ZoneVector<Type> locals_;
+  LocalDeclEncoder locals_;
   uint8_t exported_;
-  uint8_t external_;
   ZoneVector<uint8_t> body_;
-  ZoneVector<uint32_t> local_indices_;
   ZoneVector<char> name_;
-  uint16_t AddVar(LocalType type, bool param);
-  void IndexVars(WasmFunctionEncoder* e, uint16_t* var_index) const;
+  void IndexVars(WasmFunctionEncoder* e, uint32_t* var_index) const;
 };
 
 class WasmDataSegmentEncoder : public ZoneObject {
@@ -105,6 +93,12 @@
   const byte* end_;
 };
 
+struct WasmFunctionImport {
+  uint32_t sig_index;
+  const char* name;
+  int name_length;
+};
+
 class WasmModuleWriter : public ZoneObject {
  public:
   WasmModuleIndex* WriteTo(Zone* zone) const;
@@ -112,10 +106,11 @@
  private:
   friend class WasmModuleBuilder;
   explicit WasmModuleWriter(Zone* zone);
+  ZoneVector<WasmFunctionImport> imports_;
   ZoneVector<WasmFunctionEncoder*> functions_;
   ZoneVector<WasmDataSegmentEncoder*> data_segments_;
   ZoneVector<FunctionSig*> signatures_;
-  ZoneVector<uint16_t> indirect_functions_;
+  ZoneVector<uint32_t> indirect_functions_;
   ZoneVector<std::pair<MachineType, bool>> globals_;
   int start_function_index_;
 };
@@ -123,32 +118,33 @@
 class WasmModuleBuilder : public ZoneObject {
  public:
   explicit WasmModuleBuilder(Zone* zone);
-  uint16_t AddFunction();
+  uint32_t AddFunction();
   uint32_t AddGlobal(MachineType type, bool exported);
   WasmFunctionBuilder* FunctionAt(size_t index);
   void AddDataSegment(WasmDataSegmentEncoder* data);
-  uint16_t AddSignature(FunctionSig* sig);
-  void AddIndirectFunction(uint16_t index);
-  void MarkStartFunction(uint16_t index);
+  uint32_t AddSignature(FunctionSig* sig);
+  void AddIndirectFunction(uint32_t index);
+  void MarkStartFunction(uint32_t index);
+  uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
   WasmModuleWriter* Build(Zone* zone);
 
   struct CompareFunctionSigs {
     bool operator()(FunctionSig* a, FunctionSig* b) const;
   };
-  typedef ZoneMap<FunctionSig*, uint16_t, CompareFunctionSigs> SignatureMap;
+  typedef ZoneMap<FunctionSig*, uint32_t, CompareFunctionSigs> SignatureMap;
 
  private:
   Zone* zone_;
   ZoneVector<FunctionSig*> signatures_;
+  ZoneVector<WasmFunctionImport> imports_;
   ZoneVector<WasmFunctionBuilder*> functions_;
   ZoneVector<WasmDataSegmentEncoder*> data_segments_;
-  ZoneVector<uint16_t> indirect_functions_;
+  ZoneVector<uint32_t> indirect_functions_;
   ZoneVector<std::pair<MachineType, bool>> globals_;
   SignatureMap signature_map_;
   int start_function_index_;
 };
 
-std::vector<uint8_t> UnsignedLEB128From(uint32_t result);
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/leb-helper.h b/src/wasm/leb-helper.h
new file mode 100644
index 0000000..7ba244d
--- /dev/null
+++ b/src/wasm/leb-helper.h
@@ -0,0 +1,131 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_LEB_HELPER_H_
+#define V8_WASM_LEB_HELPER_H_
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class LEBHelper {
+ public:
+  // Write a 32-bit unsigned LEB to {dest}, updating {dest} to point after
+  // the last uint8_t written. No safety checks.
+  static void write_u32v(uint8_t** dest, uint32_t val) {
+    while (val >= 0x80) {
+      *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+      val >>= 7;
+    }
+    *((*dest)++) = static_cast<uint8_t>(val & 0x7F);
+  }
+
+  // Write a 32-bit signed LEB to {dest}, updating {dest} to point after
+  // the last uint8_t written. No safety checks.
+  static void write_i32v(uint8_t** dest, int32_t val) {
+    if (val >= 0) {
+      while (val >= 0x40) {  // prevent sign extension.
+        *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+        val >>= 7;
+      }
+      *((*dest)++) = static_cast<uint8_t>(val & 0xFF);
+    } else {
+      while ((val >> 6) != -1) {
+        *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+        val >>= 7;
+      }
+      *((*dest)++) = static_cast<uint8_t>(val & 0x7F);
+    }
+  }
+
+  // Write a 64-bit unsigned LEB to {dest}, updating {dest} to point after
+  // the last uint8_t written. No safety checks.
+  static void write_u64v(uint8_t** dest, uint64_t val) {
+    while (val >= 0x80) {
+      *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+      val >>= 7;
+    }
+    *((*dest)++) = static_cast<uint8_t>(val & 0x7F);
+  }
+
+  // Write a 64-bit signed LEB to {dest}, updating {dest} to point after
+  // the last uint8_t written. No safety checks.
+  static void write_i64v(uint8_t** dest, int64_t val) {
+    if (val >= 0) {
+      while (val >= 0x40) {  // prevent sign extension.
+        *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+        val >>= 7;
+      }
+      *((*dest)++) = static_cast<uint8_t>(val & 0xFF);
+    } else {
+      while ((val >> 6) != -1) {
+        *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+        val >>= 7;
+      }
+      *((*dest)++) = static_cast<uint8_t>(val & 0x7F);
+    }
+  }
+
+  // TODO(titzer): move core logic for decoding LEBs from decoder.h to here.
+
+  // Compute the size of {val} if emitted as an LEB32.
+  static inline size_t sizeof_u32v(size_t val) {
+    size_t size = 0;
+    do {
+      size++;
+      val = val >> 7;
+    } while (val > 0);
+    return size;
+  }
+
+  // Compute the size of {val} if emitted as an LEB32.
+  static inline size_t sizeof_i32v(int32_t val) {
+    size_t size = 1;
+    if (val >= 0) {
+      while (val >= 0x40) {  // prevent sign extension.
+        size++;
+        val >>= 7;
+      }
+    } else {
+      while ((val >> 6) != -1) {
+        size++;
+        val >>= 7;
+      }
+    }
+    return size;
+  }
+
+  // Compute the size of {val} if emitted as an unsigned LEB64.
+  static inline size_t sizeof_u64v(uint64_t val) {
+    size_t size = 0;
+    do {
+      size++;
+      val = val >> 7;
+    } while (val > 0);
+    return size;
+  }
+
+  // Compute the size of {val} if emitted as a signed LEB64.
+  static inline size_t sizeof_i64v(int64_t val) {
+    size_t size = 1;
+    if (val >= 0) {
+      while (val >= 0x40) {  // prevent sign extension.
+        size++;
+        val >>= 7;
+      }
+    } else {
+      while ((val >> 6) != -1) {
+        size++;
+        val >>= 7;
+      }
+    }
+    return size;
+  }
+};
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_LEB_HELPER_H_
diff --git a/src/wasm/module-decoder.cc b/src/wasm/module-decoder.cc
index 3e85a1b..f7d26a5 100644
--- a/src/wasm/module-decoder.cc
+++ b/src/wasm/module-decoder.cc
@@ -25,7 +25,6 @@
 #define TRACE(...)
 #endif
 
-
 // The main logic for decoding the bytes of a module.
 class ModuleDecoder : public Decoder {
  public:
@@ -79,9 +78,8 @@
     module->mem_external = false;
     module->origin = origin_;
 
-    bool sections[(size_t)WasmSection::Code::Max] = {false};
-
     const byte* pos = pc_;
+    int current_order = 0;
     uint32_t magic_word = consume_u32("wasm magic");
 #define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
     if (magic_word != kWasmMagic) {
@@ -109,45 +107,45 @@
       TRACE("DecodeSection\n");
       pos = pc_;
 
-      int length;
-      uint32_t section_length = consume_u32v(&length, "section size");
-
-      int section_string_leb_length = 0;
-      uint32_t section_string_length = 0;
-      WasmSection::Code section = consume_section_name(
-          &section_string_leb_length, &section_string_length);
-      uint32_t string_and_leb_length =
-          section_string_leb_length + section_string_length;
-      if (string_and_leb_length > section_length) {
-        error(pos, pos,
-              "section string of size %u longer than total section bytes %u",
-              string_and_leb_length, section_length);
+      // Read the section name.
+      int string_leb_length = 0;
+      uint32_t string_length =
+          consume_u32v(&string_leb_length, "section name length");
+      const byte* section_name_start = pc_;
+      consume_bytes(string_length);
+      if (failed()) {
+        TRACE("Section name of length %u couldn't be read\n", string_length);
         break;
       }
 
-      if (section == WasmSection::Code::Max) {
-        // Skip unknown section.
-        uint32_t skip = section_length - string_and_leb_length;
-        TRACE("skipping %u bytes from unknown section\n", skip);
-        consume_bytes(skip);
-        continue;
-      }
+      WasmSection::Code section =
+          WasmSection::lookup(section_name_start, string_length);
 
-      // Each section should appear at most once.
-      CheckForPreviousSection(sections, section, false);
-      sections[(size_t)section] = true;
+      // Read and check the section size.
+      int section_leb_length = 0;
+      uint32_t section_length =
+          consume_u32v(&section_leb_length, "section length");
+      if (!checkAvailable(section_length)) {
+        // The section would extend beyond the end of the module.
+        break;
+      }
+      const byte* section_start = pc_;
+      const byte* expected_section_end = pc_ + section_length;
+
+      current_order = CheckSectionOrder(current_order, section);
 
       switch (section) {
         case WasmSection::Code::End:
           // Terminate section decoding.
           limit_ = pc_;
           break;
-        case WasmSection::Code::Memory:
+        case WasmSection::Code::Memory: {
           int length;
           module->min_mem_pages = consume_u32v(&length, "min memory");
           module->max_mem_pages = consume_u32v(&length, "max memory");
           module->mem_export = consume_u8("export memory") != 0;
           break;
+        }
         case WasmSection::Code::Signatures: {
           int length;
           uint32_t signatures_count = consume_u32v(&length, "signatures count");
@@ -157,30 +155,30 @@
             if (failed()) break;
             TRACE("DecodeSignature[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
-            FunctionSig* s = consume_sig();  // read function sig.
+            FunctionSig* s = consume_sig();
             module->signatures.push_back(s);
           }
           break;
         }
         case WasmSection::Code::FunctionSignatures: {
-          // Functions require a signature table first.
-          CheckForPreviousSection(sections, WasmSection::Code::Signatures,
-                                  true);
           int length;
           uint32_t functions_count = consume_u32v(&length, "functions count");
           module->functions.reserve(SafeReserve(functions_count));
           for (uint32_t i = 0; i < functions_count; i++) {
-            module->functions.push_back(
-                {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
+            module->functions.push_back({nullptr,  // sig
+                                         i,        // func_index
+                                         0,        // sig_index
+                                         0,        // name_offset
+                                         0,        // name_length
+                                         0,        // code_start_offset
+                                         0,        // code_end_offset
+                                         false});  // exported
             WasmFunction* function = &module->functions.back();
             function->sig_index = consume_sig_index(module, &function->sig);
           }
           break;
         }
         case WasmSection::Code::FunctionBodies: {
-          // Function bodies should follow signatures.
-          CheckForPreviousSection(sections,
-                                  WasmSection::Code::FunctionSignatures, true);
           int length;
           const byte* pos = pc_;
           uint32_t functions_count = consume_u32v(&length, "functions count");
@@ -206,10 +204,7 @@
           }
           break;
         }
-        case WasmSection::Code::Functions: {
-          // Functions require a signature table first.
-          CheckForPreviousSection(sections, WasmSection::Code::Signatures,
-                                  true);
+        case WasmSection::Code::OldFunctions: {
           int length;
           uint32_t functions_count = consume_u32v(&length, "functions count");
           module->functions.reserve(SafeReserve(functions_count));
@@ -224,8 +219,14 @@
             TRACE("DecodeFunction[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
 
-            module->functions.push_back(
-                {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
+            module->functions.push_back({nullptr,  // sig
+                                         i,        // func_index
+                                         0,        // sig_index
+                                         0,        // name_offset
+                                         0,        // name_length
+                                         0,        // code_start_offset
+                                         0,        // code_end_offset
+                                         false});  // exported
             WasmFunction* function = &module->functions.back();
             DecodeFunctionInModule(module, function, false);
           }
@@ -233,19 +234,15 @@
             for (uint32_t i = 0; i < functions_count; i++) {
               if (failed()) break;
               WasmFunction* function = &module->functions[i];
-              if (!function->external) {
-                VerifyFunctionBody(i, &menv, function);
-                if (result_.failed())
-                  error(result_.error_pc, result_.error_msg.get());
+              VerifyFunctionBody(i, &menv, function);
+              if (result_.failed()) {
+                error(result_.error_pc, result_.error_msg.get());
               }
             }
           }
           break;
         }
         case WasmSection::Code::Names: {
-          // Names correspond to functions.
-          CheckForPreviousSection(sections,
-                                  WasmSection::Code::FunctionSignatures, true);
           int length;
           const byte* pos = pc_;
           uint32_t functions_count = consume_u32v(&length, "functions count");
@@ -259,13 +256,13 @@
           for (uint32_t i = 0; i < functions_count; i++) {
             WasmFunction* function = &module->functions[i];
             function->name_offset =
-                consume_string(&function->name_length, "function name");
+                consume_string(&function->name_length, false);
 
             uint32_t local_names_count =
                 consume_u32v(&length, "local names count");
             for (uint32_t j = 0; j < local_names_count; j++) {
               uint32_t unused = 0;
-              uint32_t offset = consume_string(&unused, "local name");
+              uint32_t offset = consume_string(&unused, false);
               USE(unused);
               USE(offset);
             }
@@ -297,7 +294,10 @@
             if (failed()) break;
             TRACE("DecodeDataSegment[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
-            module->data_segments.push_back({0, 0, 0});
+            module->data_segments.push_back({0,        // dest_addr
+                                             0,        // source_offset
+                                             0,        // source_size
+                                             false});  // init
             WasmDataSegment* segment = &module->data_segments.back();
             DecodeDataSegmentInModule(module, segment);
           }
@@ -341,9 +341,6 @@
           break;
         }
         case WasmSection::Code::ImportTable: {
-          // Declares an import table.
-          CheckForPreviousSection(sections, WasmSection::Code::Signatures,
-                                  true);
           int length;
           uint32_t import_table_count =
               consume_u32v(&length, "import table count");
@@ -354,18 +351,23 @@
             TRACE("DecodeImportTable[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
 
-            module->import_table.push_back({nullptr, 0, 0});
+            module->import_table.push_back({nullptr,  // sig
+                                            0,        // sig_index
+                                            0,        // module_name_offset
+                                            0,        // module_name_length
+                                            0,        // function_name_offset
+                                            0});      // function_name_length
             WasmImport* import = &module->import_table.back();
 
             import->sig_index = consume_sig_index(module, &import->sig);
             const byte* pos = pc_;
-            import->module_name_offset = consume_string(
-                &import->module_name_length, "import module name");
+            import->module_name_offset =
+                consume_string(&import->module_name_length, true);
             if (import->module_name_length == 0) {
               error(pos, "import module name cannot be NULL");
             }
-            import->function_name_offset = consume_string(
-                &import->function_name_length, "import function name");
+            import->function_name_offset =
+                consume_string(&import->function_name_length, true);
           }
           break;
         }
@@ -382,17 +384,37 @@
             TRACE("DecodeExportTable[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
 
-            module->export_table.push_back({0, 0});
+            module->export_table.push_back({0,    // func_index
+                                            0,    // name_offset
+                                            0});  // name_length
             WasmExport* exp = &module->export_table.back();
 
             WasmFunction* func;
             exp->func_index = consume_func_index(module, &func);
-            exp->name_offset = consume_string(&exp->name_length, "export name");
+            exp->name_offset = consume_string(&exp->name_length, true);
           }
           break;
         }
         case WasmSection::Code::Max:
-          UNREACHABLE();  // Already skipped unknown sections.
+          // Skip unknown sections.
+          TRACE("Unknown section: '");
+          for (uint32_t i = 0; i != string_length; ++i) {
+            TRACE("%c", *(section_name_start + i));
+          }
+          TRACE("'\n");
+          consume_bytes(section_length);
+          break;
+      }
+
+      if (pc_ != expected_section_end) {
+        const char* diff = pc_ < expected_section_end ? "shorter" : "longer";
+        size_t expected_length = static_cast<size_t>(section_length);
+        size_t actual_length = static_cast<size_t>(pc_ - section_start);
+        error(pc_, pc_,
+              "section \"%s\" %s (%zu bytes) than specified (%zu bytes)",
+              WasmSection::getName(section), diff, actual_length,
+              expected_length);
+        break;
       }
     }
 
@@ -417,17 +439,18 @@
     }
   }
 
-  void CheckForPreviousSection(bool* sections, WasmSection::Code section,
-                               bool present) {
-    if (section >= WasmSection::Code::Max) return;
-    if (sections[(size_t)section] == present) return;
-    if (present) {
-      error(pc_ - 1, nullptr, "required %s section missing",
-            WasmSection::getName(section));
-    } else {
-      error(pc_ - 1, nullptr, "%s section already present",
+  int CheckSectionOrder(int current_order, WasmSection::Code section) {
+    int next_order = WasmSection::getOrder(section);
+    if (next_order == 0) return current_order;
+    if (next_order == current_order) {
+      error(pc_, pc_, "section \"%s\" already defined",
             WasmSection::getName(section));
     }
+    if (next_order < current_order) {
+      error(pc_, pc_, "section \"%s\" out of order",
+            WasmSection::getName(section));
+    }
+    return next_order;
   }
 
   // Decodes a single anonymous function starting at {start_}.
@@ -440,7 +463,6 @@
     function->code_start_offset = off(pc_);   // ---- code start
     function->code_end_offset = off(limit_);  // ---- code end
     function->exported = false;               // ---- exported
-    function->external = false;               // ---- external
 
     if (ok()) VerifyFunctionBody(0, module_env, function);
 
@@ -466,7 +488,9 @@
 
   // Decodes a single global entry inside a module starting at {pc_}.
   void DecodeGlobalInModule(WasmGlobal* global) {
-    global->name_offset = consume_string(&global->name_length, "global name");
+    global->name_offset = consume_string(&global->name_length, false);
+    DCHECK(unibrow::Utf8::Validate(start_ + global->name_offset,
+                                   global->name_length));
     global->type = mem_type();
     global->offset = 0;
     global->exported = consume_u8("exported") != 0;
@@ -487,32 +511,15 @@
       function->sig = module->signatures[function->sig_index];
     }
 
-    TRACE("  +%d  <function attributes:%s%s%s%s%s>\n",
-          static_cast<int>(pc_ - start_),
+    TRACE("  +%d  <function attributes:%s%s>\n", static_cast<int>(pc_ - start_),
           decl_bits & kDeclFunctionName ? " name" : "",
-          decl_bits & kDeclFunctionImport ? " imported" : "",
-          decl_bits & kDeclFunctionLocals ? " locals" : "",
-          decl_bits & kDeclFunctionExport ? " exported" : "",
-          (decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
-
-    if (decl_bits & kDeclFunctionName) {
-      function->name_offset =
-          consume_string(&function->name_length, "function name");
-    }
+          decl_bits & kDeclFunctionExport ? " exported" : "");
 
     function->exported = decl_bits & kDeclFunctionExport;
 
-    // Imported functions have no locals or body.
-    if (decl_bits & kDeclFunctionImport) {
-      function->external = true;
-      return;
-    }
-
-    if (decl_bits & kDeclFunctionLocals) {
-      function->local_i32_count = consume_u16("i32 count");
-      function->local_i64_count = consume_u16("i64 count");
-      function->local_f32_count = consume_u16("f32 count");
-      function->local_f64_count = consume_u16("f64 count");
+    if (decl_bits & kDeclFunctionName) {
+      function->name_offset =
+          consume_string(&function->name_length, function->exported);
     }
 
     uint16_t size = consume_u16("body size");
@@ -566,11 +573,10 @@
   // Verifies the body (code) of a given function.
   void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
                           WasmFunction* function) {
-    if (FLAG_trace_wasm_decode_time) {
+    if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
       OFStream os(stdout);
       os << "Verifying WASM function " << WasmFunctionName(function, menv)
          << std::endl;
-      os << std::endl;
     }
     FunctionBody body = {menv, function->sig, start_,
                          start_ + function->code_start_offset,
@@ -606,11 +612,14 @@
 
   // Reads a length-prefixed string, checking that it is within bounds. Returns
   // the offset of the string, and the length as an out parameter.
-  uint32_t consume_string(uint32_t* length, const char* name = nullptr) {
+  uint32_t consume_string(uint32_t* length, bool validate_utf8) {
     int varint_length;
     *length = consume_u32v(&varint_length, "string length");
     uint32_t offset = pc_offset();
     TRACE("  +%u  %-20s: (%u bytes)\n", offset, "string", *length);
+    if (validate_utf8 && !unibrow::Utf8::Validate(pc_, *length)) {
+      error(pc_, "no valid UTF-8 string");
+    }
     consume_bytes(*length);
     return offset;
   }
@@ -643,30 +652,6 @@
     return func_index;
   }
 
-  // Reads a section name.
-  WasmSection::Code consume_section_name(int* string_leb_length,
-                                         uint32_t* string_length) {
-    *string_length = consume_u32v(string_leb_length, "name length");
-    const byte* start = pc_;
-    consume_bytes(*string_length);
-    if (failed()) {
-      TRACE("Section name of length %u couldn't be read\n", *string_length);
-      return WasmSection::Code::Max;
-    }
-    // TODO(jfb) Linear search, it may be better to do a common-prefix search.
-    for (WasmSection::Code i = WasmSection::begin(); i != WasmSection::end();
-         i = WasmSection::next(i)) {
-      if (WasmSection::getNameLength(i) == *string_length &&
-          0 == memcmp(WasmSection::getName(i), start, *string_length)) {
-        return i;
-      }
-    }
-    TRACE("Unknown section: '");
-    for (uint32_t i = 0; i != *string_length; ++i) TRACE("%c", *(start + i));
-    TRACE("'\n");
-    return WasmSection::Code::Max;
-  }
-
   // Reads a single 8-bit integer, interpreting it as a local type.
   LocalType consume_local_type() {
     byte val = consume_u8("local type");
@@ -719,24 +704,51 @@
     }
   }
 
-  // Parses an inline function signature.
+  // Parses a type entry, which is currently limited to functions only.
   FunctionSig* consume_sig() {
+    const byte* pos = pc_;
+    byte form = consume_u8("type form");
+    if (form != kWasmFunctionTypeForm) {
+      error(pos, pos, "expected function type form (0x%02x), got: 0x%02x",
+            kWasmFunctionTypeForm, form);
+      return nullptr;
+    }
     int length;
-    byte count = consume_u32v(&length, "param count");
-    LocalType ret = consume_local_type();
-    FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
-    if (ret != kAstStmt) builder.AddReturn(ret);
-
-    for (int i = 0; i < count; i++) {
+    // parse parameter types
+    uint32_t param_count = consume_u32v(&length, "param count");
+    std::vector<LocalType> params;
+    for (uint32_t i = 0; i < param_count; i++) {
       LocalType param = consume_local_type();
       if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
-      builder.AddParam(param);
+      params.push_back(param);
     }
-    return builder.Build();
+
+    // parse return types
+    const byte* pt = pc_;
+    uint32_t return_count = consume_u32v(&length, "return count");
+    if (return_count > kMaxReturnCount) {
+      error(pt, pt, "return count of %u exceeds maximum of %u", return_count,
+            kMaxReturnCount);
+      return nullptr;
+    }
+    std::vector<LocalType> returns;
+    for (uint32_t i = 0; i < return_count; i++) {
+      LocalType ret = consume_local_type();
+      if (ret == kAstStmt) error(pc_ - 1, "invalid void return type");
+      returns.push_back(ret);
+    }
+
+    // FunctionSig stores the return types first.
+    LocalType* buffer =
+        module_zone->NewArray<LocalType>(param_count + return_count);
+    uint32_t b = 0;
+    for (uint32_t i = 0; i < return_count; i++) buffer[b++] = returns[i];
+    for (uint32_t i = 0; i < param_count; i++) buffer[b++] = params[i];
+
+    return new (module_zone) FunctionSig(return_count, param_count, buffer);
   }
 };
 
-
 // Helpers for nice error messages.
 class ModuleError : public ModuleResult {
  public:
@@ -750,7 +762,6 @@
   }
 };
 
-
 // Helpers for nice error messages.
 class FunctionError : public FunctionResult {
  public:
@@ -767,30 +778,42 @@
 ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
                               const byte* module_start, const byte* module_end,
                               bool verify_functions, ModuleOrigin origin) {
+  size_t decode_memory_start = zone->allocation_size();
+  HistogramTimerScope wasm_decode_module_time_scope(
+      isolate->counters()->wasm_decode_module_time());
   size_t size = module_end - module_start;
   if (module_start > module_end) return ModuleError("start > end");
   if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
+  // TODO(bradnelson): Improve histogram handling of size_t.
+  isolate->counters()->wasm_module_size_bytes()->AddSample(
+      static_cast<int>(size));
   WasmModule* module = new WasmModule();
   ModuleDecoder decoder(zone, module_start, module_end, origin);
-  return decoder.DecodeModule(module, verify_functions);
+  ModuleResult result = decoder.DecodeModule(module, verify_functions);
+  // TODO(bradnelson): Improve histogram handling of size_t.
+  isolate->counters()->wasm_decode_module_peak_memory_bytes()->AddSample(
+      static_cast<int>(zone->allocation_size() - decode_memory_start));
+  return result;
 }
 
-
 FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
                                            const byte* end) {
   ModuleDecoder decoder(zone, start, end, kWasmOrigin);
   return decoder.DecodeFunctionSignature(start);
 }
 
-
 FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
                                   ModuleEnv* module_env,
                                   const byte* function_start,
                                   const byte* function_end) {
+  HistogramTimerScope wasm_decode_function_time_scope(
+      isolate->counters()->wasm_decode_function_time());
   size_t size = function_end - function_start;
   if (function_start > function_end) return FunctionError("start > end");
   if (size > kMaxFunctionSize)
     return FunctionError("size > maximum function size");
+  isolate->counters()->wasm_function_size_bytes()->AddSample(
+      static_cast<int>(size));
   WasmFunction* function = new WasmFunction();
   ModuleDecoder decoder(zone, function_start, function_end, kWasmOrigin);
   return decoder.DecodeSingleFunction(module_env, function);
diff --git a/src/wasm/switch-logic.cc b/src/wasm/switch-logic.cc
new file mode 100644
index 0000000..f8e3f0d
--- /dev/null
+++ b/src/wasm/switch-logic.cc
@@ -0,0 +1,63 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/switch-logic.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+CaseNode* CreateBst(ZoneVector<CaseNode*>* nodes, size_t begin, size_t end) {
+  if (end < begin) {
+    return nullptr;
+  } else if (end == begin) {
+    return nodes->at(begin);
+  } else {
+    size_t root_index = (begin + end) / 2;
+    CaseNode* root = nodes->at(root_index);
+    if (root_index != 0) {
+      root->left = CreateBst(nodes, begin, root_index - 1);
+    }
+    root->right = CreateBst(nodes, root_index + 1, end);
+    return root;
+  }
+}
+}  // namespace
+
+CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone) {
+  const int max_distance = 2;
+  const int min_size = 4;
+  if (cases->empty()) {
+    return nullptr;
+  }
+  std::sort(cases->begin(), cases->end());
+  ZoneVector<size_t> table_breaks(zone);
+  for (size_t i = 1; i < cases->size(); i++) {
+    if (cases->at(i) - cases->at(i - 1) > max_distance) {
+      table_breaks.push_back(i);
+    }
+  }
+  table_breaks.push_back(cases->size());
+  ZoneVector<CaseNode*> nodes(zone);
+  size_t curr_pos = 0;
+  for (size_t i = 0; i < table_breaks.size(); i++) {
+    size_t break_pos = table_breaks[i];
+    if (break_pos - curr_pos >= min_size) {
+      int begin = cases->at(curr_pos);
+      int end = cases->at(break_pos - 1);
+      nodes.push_back(new (zone) CaseNode(begin, end));
+      curr_pos = break_pos;
+    } else {
+      for (; curr_pos < break_pos; curr_pos++) {
+        nodes.push_back(new (zone)
+                            CaseNode(cases->at(curr_pos), cases->at(curr_pos)));
+      }
+    }
+  }
+  return CreateBst(&nodes, 0, nodes.size() - 1);
+}
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/src/wasm/switch-logic.h b/src/wasm/switch-logic.h
new file mode 100644
index 0000000..8cef08b
--- /dev/null
+++ b/src/wasm/switch-logic.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_SWITCH_LOGIC_H
+#define V8_WASM_SWITCH_LOGIC_H
+
+#include "src/zone-containers.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+struct CaseNode : public ZoneObject {
+  const int begin;
+  const int end;
+  CaseNode* left;
+  CaseNode* right;
+  CaseNode(int begin, int end) : begin(begin), end(end) {
+    left = nullptr;
+    right = nullptr;
+  }
+};
+
+CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/wasm/wasm-external-refs.cc b/src/wasm/wasm-external-refs.cc
new file mode 100644
index 0000000..e155f3c
--- /dev/null
+++ b/src/wasm/wasm-external-refs.cc
@@ -0,0 +1,199 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <math.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <limits>
+
+#include "include/v8config.h"
+
+#include "src/base/bits.h"
+#include "src/wasm/wasm-external-refs.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+
+void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+
+void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+
+void f32_nearest_int_wrapper(float* param) { *param = nearbyintf(*param); }
+
+void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
+
+void f64_floor_wrapper(double* param) { *param = floor(*param); }
+
+void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
+
+void f64_nearest_int_wrapper(double* param) { *param = nearbyint(*param); }
+
+void int64_to_float32_wrapper(int64_t* input, float* output) {
+  *output = static_cast<float>(*input);
+}
+
+void uint64_to_float32_wrapper(uint64_t* input, float* output) {
+#if V8_CC_MSVC
+  // With MSVC we use static_cast<float>(uint32_t) instead of
+  // static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even
+  // semantics. The idea is to calculate
+  // static_cast<float>(high_word) * 2^32 + static_cast<float>(low_word). To
+  // achieve proper rounding in all cases we have to adjust the high_word
+  // with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
+  // the high_word if the low_word may affect the rounding of the high_word.
+  uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+  uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+
+  float shift = static_cast<float>(1ull << 32);
+  // If the MSB of the high_word is set, then we make space for a rounding bit.
+  if (high_word < 0x80000000) {
+    high_word <<= 1;
+    shift = static_cast<float>(1ull << 31);
+  }
+
+  if ((high_word & 0xfe000000) && low_word) {
+    // Set the rounding bit.
+    high_word |= 1;
+  }
+
+  float result = static_cast<float>(high_word);
+  result *= shift;
+  result += static_cast<float>(low_word);
+  *output = result;
+
+#else
+  *output = static_cast<float>(*input);
+#endif
+}
+
+void int64_to_float64_wrapper(int64_t* input, double* output) {
+  *output = static_cast<double>(*input);
+}
+
+void uint64_to_float64_wrapper(uint64_t* input, double* output) {
+#if V8_CC_MSVC
+  // With MSVC we use static_cast<double>(uint32_t) instead of
+  // static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
+  // semantics. The idea is to calculate
+  // static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
+  uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+  uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+
+  double shift = static_cast<double>(1ull << 32);
+
+  double result = static_cast<double>(high_word);
+  result *= shift;
+  result += static_cast<double>(low_word);
+  *output = result;
+
+#else
+  *output = static_cast<double>(*input);
+#endif
+}
+
+int32_t float32_to_int64_wrapper(float* input, int64_t* output) {
+  // We use "<" here to check the upper bound because of rounding problems: With
+  // "<=" some inputs would be considered within int64 range which are actually
+  // not within int64 range.
+  if (*input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
+      *input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
+    *output = static_cast<int64_t>(*input);
+    return 1;
+  }
+  return 0;
+}
+
+int32_t float32_to_uint64_wrapper(float* input, uint64_t* output) {
+  // We use "<" here to check the upper bound because of rounding problems: With
+  // "<=" some inputs would be considered within uint64 range which are actually
+  // not within uint64 range.
+  if (*input > -1.0 &&
+      *input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
+    *output = static_cast<uint64_t>(*input);
+    return 1;
+  }
+  return 0;
+}
+
+int32_t float64_to_int64_wrapper(double* input, int64_t* output) {
+  // We use "<" here to check the upper bound because of rounding problems: With
+  // "<=" some inputs would be considered within int64 range which are actually
+  // not within int64 range.
+  if (*input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
+      *input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+    *output = static_cast<int64_t>(*input);
+    return 1;
+  }
+  return 0;
+}
+
+int32_t float64_to_uint64_wrapper(double* input, uint64_t* output) {
+  // We use "<" here to check the upper bound because of rounding problems: With
+  // "<=" some inputs would be considered within uint64 range which are actually
+  // not within uint64 range.
+  if (*input > -1.0 &&
+      *input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+    *output = static_cast<uint64_t>(*input);
+    return 1;
+  }
+  return 0;
+}
+
+int32_t int64_div_wrapper(int64_t* dst, int64_t* src) {
+  if (*src == 0) {
+    return 0;
+  }
+  if (*src == -1 && *dst == std::numeric_limits<int64_t>::min()) {
+    return -1;
+  }
+  *dst /= *src;
+  return 1;
+}
+
+int32_t int64_mod_wrapper(int64_t* dst, int64_t* src) {
+  if (*src == 0) {
+    return 0;
+  }
+  *dst %= *src;
+  return 1;
+}
+
+int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src) {
+  if (*src == 0) {
+    return 0;
+  }
+  *dst /= *src;
+  return 1;
+}
+
+int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
+  if (*src == 0) {
+    return 0;
+  }
+  *dst %= *src;
+  return 1;
+}
+
+uint32_t word32_ctz_wrapper(uint32_t* input) {
+  return static_cast<uint32_t>(base::bits::CountTrailingZeros32(*input));
+}
+
+uint32_t word64_ctz_wrapper(uint64_t* input) {
+  return static_cast<uint32_t>(base::bits::CountTrailingZeros64(*input));
+}
+
+uint32_t word32_popcnt_wrapper(uint32_t* input) {
+  return static_cast<uint32_t>(base::bits::CountPopulation(*input));
+}
+
+uint32_t word64_popcnt_wrapper(uint64_t* input) {
+  return static_cast<uint32_t>(base::bits::CountPopulation(*input));
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/src/wasm/wasm-external-refs.h b/src/wasm/wasm-external-refs.h
index 4aa452b..ac938d6 100644
--- a/src/wasm/wasm-external-refs.h
+++ b/src/wasm/wasm-external-refs.h
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <stdint.h>
+
 #ifndef WASM_EXTERNAL_REFS_H
 #define WASM_EXTERNAL_REFS_H
 
@@ -9,173 +11,54 @@
 namespace internal {
 namespace wasm {
 
-static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+void f32_trunc_wrapper(float* param);
 
-static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+void f32_floor_wrapper(float* param);
 
-static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+void f32_ceil_wrapper(float* param);
 
-static void f32_nearest_int_wrapper(float* param) {
-  *param = nearbyintf(*param);
-}
+void f32_nearest_int_wrapper(float* param);
 
-static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
+void f64_trunc_wrapper(double* param);
 
-static void f64_floor_wrapper(double* param) { *param = floor(*param); }
+void f64_floor_wrapper(double* param);
 
-static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
+void f64_ceil_wrapper(double* param);
 
-static void f64_nearest_int_wrapper(double* param) {
-  *param = nearbyint(*param);
-}
+void f64_nearest_int_wrapper(double* param);
 
-static void int64_to_float32_wrapper(int64_t* input, float* output) {
-  *output = static_cast<float>(*input);
-}
+void int64_to_float32_wrapper(int64_t* input, float* output);
 
-static void uint64_to_float32_wrapper(uint64_t* input, float* output) {
-#if V8_CC_MSVC
-  // With MSVC we use static_cast<float>(uint32_t) instead of
-  // static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even
-  // semantics. The idea is to calculate
-  // static_cast<float>(high_word) * 2^32 + static_cast<float>(low_word). To
-  // achieve proper rounding in all cases we have to adjust the high_word
-  // with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
-  // the high_word if the low_word may affect the rounding of the high_word.
-  uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
-  uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+void uint64_to_float32_wrapper(uint64_t* input, float* output);
 
-  float shift = static_cast<float>(1ull << 32);
-  // If the MSB of the high_word is set, then we make space for a rounding bit.
-  if (high_word < 0x80000000) {
-    high_word <<= 1;
-    shift = static_cast<float>(1ull << 31);
-  }
+void int64_to_float64_wrapper(int64_t* input, double* output);
 
-  if ((high_word & 0xfe000000) && low_word) {
-    // Set the rounding bit.
-    high_word |= 1;
-  }
+void uint64_to_float64_wrapper(uint64_t* input, double* output);
 
-  float result = static_cast<float>(high_word);
-  result *= shift;
-  result += static_cast<float>(low_word);
-  *output = result;
+int32_t float32_to_int64_wrapper(float* input, int64_t* output);
 
-#else
-  *output = static_cast<float>(*input);
-#endif
-}
+int32_t float32_to_uint64_wrapper(float* input, uint64_t* output);
 
-static void int64_to_float64_wrapper(int64_t* input, double* output) {
-  *output = static_cast<double>(*input);
-}
+int32_t float64_to_int64_wrapper(double* input, int64_t* output);
 
-static void uint64_to_float64_wrapper(uint64_t* input, double* output) {
-#if V8_CC_MSVC
-  // With MSVC we use static_cast<double>(uint32_t) instead of
-  // static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
-  // semantics. The idea is to calculate
-  // static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
-  uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
-  uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+int32_t float64_to_uint64_wrapper(double* input, uint64_t* output);
 
-  double shift = static_cast<double>(1ull << 32);
+int32_t int64_div_wrapper(int64_t* dst, int64_t* src);
 
-  double result = static_cast<double>(high_word);
-  result *= shift;
-  result += static_cast<double>(low_word);
-  *output = result;
+int32_t int64_mod_wrapper(int64_t* dst, int64_t* src);
 
-#else
-  *output = static_cast<double>(*input);
-#endif
-}
+int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src);
 
-static int32_t float32_to_int64_wrapper(float* input, int64_t* output) {
-  // We use "<" here to check the upper bound because of rounding problems: With
-  // "<=" some inputs would be considered within int64 range which are actually
-  // not within int64 range.
-  if (*input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
-      *input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
-    *output = static_cast<int64_t>(*input);
-    return 1;
-  }
-  return 0;
-}
+int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src);
 
-static int32_t float32_to_uint64_wrapper(float* input, uint64_t* output) {
-  // We use "<" here to check the upper bound because of rounding problems: With
-  // "<=" some inputs would be considered within uint64 range which are actually
-  // not within uint64 range.
-  if (*input > -1.0 &&
-      *input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
-    *output = static_cast<uint64_t>(*input);
-    return 1;
-  }
-  return 0;
-}
+uint32_t word32_ctz_wrapper(uint32_t* input);
 
-static int32_t float64_to_int64_wrapper(double* input, int64_t* output) {
-  // We use "<" here to check the upper bound because of rounding problems: With
-  // "<=" some inputs would be considered within int64 range which are actually
-  // not within int64 range.
-  if (*input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
-      *input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
-    *output = static_cast<int64_t>(*input);
-    return 1;
-  }
-  return 0;
-}
+uint32_t word64_ctz_wrapper(uint64_t* input);
 
-static int32_t float64_to_uint64_wrapper(double* input, uint64_t* output) {
-  // We use "<" here to check the upper bound because of rounding problems: With
-  // "<=" some inputs would be considered within uint64 range which are actually
-  // not within uint64 range.
-  if (*input > -1.0 &&
-      *input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
-    *output = static_cast<uint64_t>(*input);
-    return 1;
-  }
-  return 0;
-}
+uint32_t word32_popcnt_wrapper(uint32_t* input);
 
-static int32_t int64_div_wrapper(int64_t* dst, int64_t* src) {
-  if (*src == 0) {
-    return 0;
-  }
-  if (*src == -1 && *dst == std::numeric_limits<int64_t>::min()) {
-    return -1;
-  }
-  *dst /= *src;
-  return 1;
-}
-
-static int32_t int64_mod_wrapper(int64_t* dst, int64_t* src) {
-  if (*src == 0) {
-    return 0;
-  }
-  *dst %= *src;
-  return 1;
-}
-
-static int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src) {
-  if (*src == 0) {
-    return 0;
-  }
-  *dst /= *src;
-  return 1;
-}
-
-static int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
-  if (*src == 0) {
-    return 0;
-  }
-  *dst %= *src;
-  return 1;
-}
+uint32_t word64_popcnt_wrapper(uint64_t* input);
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
-
 #endif
diff --git a/src/wasm/wasm-function-name-table.cc b/src/wasm/wasm-function-name-table.cc
new file mode 100644
index 0000000..f082704
--- /dev/null
+++ b/src/wasm/wasm-function-name-table.cc
@@ -0,0 +1,74 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-function-name-table.h"
+
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Build an array with all function names. If there are N functions in the
+// module, then the first (kIntSize * (N+1)) bytes are integer entries.
+// The first integer entry encodes the number of functions in the module.
+// The entries 1 to N contain offsets into the second part of this array.
+// After these N+1 integer entries, the second part begins, which holds a
+// concatenation of all function names.
+//
+// Returns undefined if the array length would not fit in an integer value
+Handle<Object> BuildFunctionNamesTable(Isolate* isolate, WasmModule* module) {
+  uint64_t func_names_length = 0;
+  for (auto& func : module->functions) func_names_length += func.name_length;
+  int num_funcs_int = static_cast<int>(module->functions.size());
+  int current_offset = (num_funcs_int + 1) * kIntSize;
+  uint64_t total_array_length = current_offset + func_names_length;
+  int total_array_length_int = static_cast<int>(total_array_length);
+  // Check for overflow. Just skip function names if it happens.
+  if (total_array_length_int != total_array_length || num_funcs_int < 0 ||
+      num_funcs_int != module->functions.size())
+    return isolate->factory()->undefined_value();
+  Handle<ByteArray> func_names_array =
+      isolate->factory()->NewByteArray(total_array_length_int, TENURED);
+  if (func_names_array.is_null()) return isolate->factory()->undefined_value();
+  func_names_array->set_int(0, num_funcs_int);
+  int func_index = 0;
+  for (WasmFunction& fun : module->functions) {
+    WasmName name = module->GetNameOrNull(&fun);
+    func_names_array->copy_in(current_offset,
+                              reinterpret_cast<const byte*>(name.start()),
+                              name.length());
+    func_names_array->set_int(func_index + 1, current_offset);
+    current_offset += name.length();
+    ++func_index;
+  }
+  return func_names_array;
+}
+
+Handle<Object> GetWasmFunctionNameFromTable(Handle<ByteArray> func_names_array,
+                                            uint32_t func_index) {
+  uint32_t num_funcs = static_cast<uint32_t>(func_names_array->get_int(0));
+  DCHECK(static_cast<int>(num_funcs) >= 0);
+  auto undefined = [&func_names_array]() -> Handle<Object> {
+    return func_names_array->GetIsolate()->factory()->undefined_value();
+  };
+  if (func_index >= num_funcs) return undefined();
+  int offset = func_names_array->get_int(func_index + 1);
+  int next_offset = func_index == num_funcs - 1
+                        ? func_names_array->length()
+                        : func_names_array->get_int(func_index + 2);
+  ScopedVector<byte> buffer(next_offset - offset);
+  func_names_array->copy_out(offset, buffer.start(), next_offset - offset);
+  if (!unibrow::Utf8::Validate(buffer.start(), buffer.length())) {
+    return undefined();
+  }
+  MaybeHandle<Object> maybe_name =
+      func_names_array->GetIsolate()->factory()->NewStringFromUtf8(
+          Vector<const char>::cast(buffer));
+  return maybe_name.is_null() ? undefined() : maybe_name.ToHandleChecked();
+}
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
diff --git a/src/wasm/wasm-function-name-table.h b/src/wasm/wasm-function-name-table.h
new file mode 100644
index 0000000..1a71372
--- /dev/null
+++ b/src/wasm/wasm-function-name-table.h
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_FUNCTION_NAME_TABLE_H_
+#define V8_WASM_FUNCTION_NAME_TABLE_H_
+
+#include "src/handles.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Forward declarations for some WASM data structures.
+struct WasmModule;
+
+// Encode all function names of the WasmModule into one ByteArray.
+Handle<Object> BuildFunctionNamesTable(Isolate* isolate, WasmModule* module);
+
+// Extract the function name for the given func_index from the wasm module.
+// Returns undefined if the function index is invalid.
+Handle<Object> GetWasmFunctionNameFromTable(Handle<ByteArray> wasm_names_table,
+                                            uint32_t func_index);
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_FUNCTION_NAME_TABLE_H_
diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc
index 83009d7..8a4b2ff 100644
--- a/src/wasm/wasm-js.cc
+++ b/src/wasm/wasm-js.cc
@@ -34,7 +34,6 @@
   size_t size() { return static_cast<size_t>(end - start); }
 };
 
-
 RawBuffer GetRawBufferArgument(
     ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
   if (args.Length() < 1) {
@@ -77,7 +76,6 @@
   return {start, end};
 }
 
-
 void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
@@ -98,7 +96,6 @@
   if (result.val) delete result.val;
 }
 
-
 void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
@@ -134,6 +131,11 @@
     return nullptr;
   }
 
+  if (info->scope()->declarations()->length() == 0) {
+    thrower->Error("Asm.js validation failed: no declarations in scope");
+    return nullptr;
+  }
+
   info->set_literal(
       info->scope()->declarations()->at(0)->AsFunctionDeclaration()->fun());
 
@@ -180,10 +182,10 @@
     thrower->Failed("", result);
   } else {
     // Success. Instantiate the module and return the object.
-    i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
+    i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
     if (args.Length() > 1 && args[1]->IsObject()) {
       Local<Object> obj = Local<Object>::Cast(args[1]);
-      ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
+      ffi = i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
     }
 
     i::MaybeHandle<i::JSObject> object =
@@ -197,7 +199,6 @@
   if (result.val) delete result.val;
 }
 
-
 void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
@@ -229,7 +230,6 @@
                           internal::wasm::kAsmJsOrigin);
 }
 
-
 void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
@@ -243,7 +243,6 @@
 }
 }  // namespace
 
-
 // TODO(titzer): we use the API to create the function template because the
 // internal guts are too ugly to replicate here.
 static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
@@ -253,13 +252,11 @@
   return v8::Utils::OpenHandle(*local);
 }
 
-
 namespace internal {
 static Handle<String> v8_str(Isolate* isolate, const char* str) {
   return isolate->factory()->NewStringFromAsciiChecked(str);
 }
 
-
 static void InstallFunc(Isolate* isolate, Handle<JSObject> object,
                         const char* str, FunctionCallback func) {
   Handle<String> name = v8_str(isolate, str);
@@ -271,7 +268,6 @@
   JSObject::AddProperty(object, name, function, attributes);
 }
 
-
 void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
   // Setup wasm function map.
   Handle<Context> context(global->native_context(), isolate);
@@ -294,8 +290,16 @@
   InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
   InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
               InstantiateModuleFromAsm);
-}
 
+  {
+    // Add the Wasm.experimentalVersion property.
+    Handle<String> name = v8_str(isolate, "experimentalVersion");
+    PropertyAttributes attributes =
+        static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+    Handle<Smi> value = Handle<Smi>(Smi::FromInt(wasm::kWasmVersion), isolate);
+    JSObject::AddProperty(wasm_object, name, value, attributes);
+  }
+}
 
 void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
   if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
diff --git a/src/wasm/wasm-macro-gen.h b/src/wasm/wasm-macro-gen.h
index d9199e8..83ac86a 100644
--- a/src/wasm/wasm-macro-gen.h
+++ b/src/wasm/wasm-macro-gen.h
@@ -7,6 +7,8 @@
 
 #include "src/wasm/wasm-opcodes.h"
 
+#include "src/zone-containers.h"
+
 #define U32_LE(v)                                    \
   static_cast<byte>(v), static_cast<byte>((v) >> 8), \
       static_cast<byte>((v) >> 16), static_cast<byte>((v) >> 24)
@@ -58,27 +60,38 @@
 //------------------------------------------------------------------------------
 #define WASM_NOP kExprNop
 
-#define WASM_BLOCK(count, ...) kExprBlock, static_cast<byte>(count), __VA_ARGS__
-#define WASM_INFINITE_LOOP kExprLoop, 1, kExprBr, 0, kExprNop
-#define WASM_LOOP(count, ...) kExprLoop, static_cast<byte>(count), __VA_ARGS__
-#define WASM_IF(cond, tstmt) kExprIf, cond, tstmt
-#define WASM_IF_ELSE(cond, tstmt, fstmt) kExprIfElse, cond, tstmt, fstmt
-#define WASM_SELECT(cond, tval, fval) kExprSelect, cond, tval, fval
-#define WASM_BR(depth) kExprBr, static_cast<byte>(depth), kExprNop
+#define ARITY_0 0
+#define ARITY_1 1
+#define DEPTH_0 0
+#define DEPTH_1 1
+
+#define WASM_BLOCK(count, ...) kExprBlock, __VA_ARGS__, kExprEnd
+#define WASM_INFINITE_LOOP kExprLoop, kExprBr, ARITY_0, DEPTH_0, kExprEnd
+#define WASM_LOOP(count, ...) kExprLoop, __VA_ARGS__, kExprEnd
+#define WASM_IF(cond, tstmt) cond, kExprIf, tstmt, kExprEnd
+#define WASM_IF_ELSE(cond, tstmt, fstmt) \
+  cond, kExprIf, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
+#define WASM_BR(depth) kExprBr, ARITY_0, static_cast<byte>(depth)
 #define WASM_BR_IF(depth, cond) \
-  kExprBrIf, static_cast<byte>(depth), kExprNop, cond
-#define WASM_BRV(depth, val) kExprBr, static_cast<byte>(depth), val
+  cond, kExprBrIf, ARITY_0, static_cast<byte>(depth)
+#define WASM_BRV(depth, val) val, kExprBr, ARITY_1, static_cast<byte>(depth)
 #define WASM_BRV_IF(depth, val, cond) \
-  kExprBrIf, static_cast<byte>(depth), val, cond
-#define WASM_BREAK(depth) kExprBr, static_cast<byte>(depth + 1), kExprNop
-#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth), kExprNop
-#define WASM_BREAKV(depth, val) kExprBr, static_cast<byte>(depth + 1), val
-#define WASM_RETURN0 kExprReturn
-#define WASM_RETURN(...) kExprReturn, __VA_ARGS__
+  val, cond, kExprBrIf, ARITY_1, static_cast<byte>(depth)
+#define WASM_BREAK(depth) kExprBr, ARITY_0, static_cast<byte>(depth + 1)
+#define WASM_CONTINUE(depth) kExprBr, ARITY_0, static_cast<byte>(depth)
+#define WASM_BREAKV(depth, val) \
+  val, kExprBr, ARITY_1, static_cast<byte>(depth + 1)
+#define WASM_RETURN0 kExprReturn, ARITY_0
+#define WASM_RETURN1(val) val, kExprReturn, ARITY_1
+#define WASM_RETURNN(count, ...) __VA_ARGS__, kExprReturn, count
 #define WASM_UNREACHABLE kExprUnreachable
 
 #define WASM_BR_TABLE(key, count, ...) \
-  kExprBrTable, U32V_1(count), __VA_ARGS__, key
+  key, kExprBrTable, ARITY_0, U32V_1(count), __VA_ARGS__
+
+#define WASM_BR_TABLEV(val, key, count, ...) \
+  val, key, kExprBrTable, ARITY_1, U32V_1(count), __VA_ARGS__
 
 #define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
 #define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
@@ -119,8 +132,12 @@
 
 // A helper for encoding local declarations prepended to the body of a
 // function.
+// TODO(titzer): move this to an appropriate header.
 class LocalDeclEncoder {
  public:
+  explicit LocalDeclEncoder(Zone* zone, FunctionSig* s = nullptr)
+      : sig(s), local_decls(zone), total(0) {}
+
   // Prepend local declarations by creating a new buffer and copying data
   // over. The new buffer must be delete[]'d by the caller.
   void Prepend(const byte** start, const byte** end) const {
@@ -146,19 +163,16 @@
 
   // Add locals declarations to this helper. Return the index of the newly added
   // local(s), with an optional adjustment for the parameters.
-  uint32_t AddLocals(uint32_t count, LocalType type,
-                     FunctionSig* sig = nullptr) {
-    if (count == 0) {
-      return static_cast<uint32_t>((sig ? sig->parameter_count() : 0) +
-                                   local_decls.size());
-    }
-    size_t pos = local_decls.size();
+  uint32_t AddLocals(uint32_t count, LocalType type) {
+    uint32_t result =
+        static_cast<uint32_t>(total + (sig ? sig->parameter_count() : 0));
+    total += count;
     if (local_decls.size() > 0 && local_decls.back().second == type) {
       count += local_decls.back().first;
       local_decls.pop_back();
     }
     local_decls.push_back(std::pair<uint32_t, LocalType>(count, type));
-    return static_cast<uint32_t>(pos + (sig ? sig->parameter_count() : 0));
+    return result;
   }
 
   size_t Size() const {
@@ -167,8 +181,14 @@
     return size;
   }
 
+  bool has_sig() const { return sig != nullptr; }
+  FunctionSig* get_sig() const { return sig; }
+  void set_sig(FunctionSig* s) { sig = s; }
+
  private:
-  std::vector<std::pair<uint32_t, LocalType>> local_decls;
+  FunctionSig* sig;
+  ZoneVector<std::pair<uint32_t, LocalType>> local_decls;
+  size_t total;
 
   size_t SizeofUint32v(uint32_t val) const {
     size_t size = 1;
@@ -322,193 +342,251 @@
       static_cast<byte>(bit_cast<uint64_t>(val) >> 48),      \
       static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
 #define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
-#define WASM_SET_LOCAL(index, val) kExprSetLocal, static_cast<byte>(index), val
+#define WASM_SET_LOCAL(index, val) val, kExprSetLocal, static_cast<byte>(index)
 #define WASM_LOAD_GLOBAL(index) kExprLoadGlobal, static_cast<byte>(index)
 #define WASM_STORE_GLOBAL(index, val) \
-  kExprStoreGlobal, static_cast<byte>(index), val
-#define WASM_LOAD_MEM(type, index)                                      \
-  static_cast<byte>(                                                    \
-      v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
-      ZERO_ALIGNMENT, ZERO_OFFSET, index
-#define WASM_STORE_MEM(type, index, val)                               \
-  static_cast<byte>(                                                   \
-      v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
-      ZERO_ALIGNMENT, ZERO_OFFSET, index, val
-#define WASM_LOAD_MEM_OFFSET(type, offset, index)                       \
-  static_cast<byte>(                                                    \
-      v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
-      ZERO_ALIGNMENT, U32V_1(offset), index
-#define WASM_STORE_MEM_OFFSET(type, offset, index, val)                \
-  static_cast<byte>(                                                   \
-      v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
-      ZERO_ALIGNMENT, U32V_1(offset), index, val
-#define WASM_CALL_FUNCTION(index, ...) \
-  kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
-#define WASM_CALL_IMPORT(index, ...) \
-  kExprCallImport, static_cast<byte>(index), __VA_ARGS__
-#define WASM_CALL_INDIRECT(index, func, ...) \
-  kExprCallIndirect, static_cast<byte>(index), func, __VA_ARGS__
-#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
-#define WASM_CALL_IMPORT0(index) kExprCallImport, static_cast<byte>(index)
+  val, kExprStoreGlobal, static_cast<byte>(index)
+#define WASM_LOAD_MEM(type, index)                                             \
+  index, static_cast<byte>(                                                    \
+             v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+      ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_STORE_MEM(type, index, val)                                   \
+  index, val,                                                              \
+      static_cast<byte>(                                                   \
+          v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+      ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_LOAD_MEM_OFFSET(type, offset, index)                              \
+  index, static_cast<byte>(                                                    \
+             v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+      ZERO_ALIGNMENT, static_cast<byte>(offset)
+#define WASM_STORE_MEM_OFFSET(type, offset, index, val)                    \
+  index, val,                                                              \
+      static_cast<byte>(                                                   \
+          v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+      ZERO_ALIGNMENT, static_cast<byte>(offset)
+
+#define WASM_CALL_FUNCTION0(index) \
+  kExprCallFunction, 0, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION1(index, a) \
+  a, kExprCallFunction, 1, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION2(index, a, b) \
+  a, b, kExprCallFunction, 2, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION3(index, a, b, c) \
+  a, b, c, kExprCallFunction, 3, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION4(index, a, b, c, d) \
+  a, b, c, d, kExprCallFunction, 4, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION5(index, a, b, c, d, e) \
+  kExprCallFunction, 5, static_cast<byte>(index)
+#define WASM_CALL_FUNCTIONN(arity, index, ...) \
+  __VA_ARGS__, kExprCallFunction, arity, static_cast<byte>(index)
+
+#define WASM_CALL_IMPORT0(index) kExprCallImport, 0, static_cast<byte>(index)
+#define WASM_CALL_IMPORT1(index, a) \
+  a, kExprCallImport, 1, static_cast<byte>(index)
+#define WASM_CALL_IMPORT2(index, a, b) \
+  a, b, kExprCallImport, 2, static_cast<byte>(index)
+#define WASM_CALL_IMPORT3(index, a, b, c) \
+  a, b, c, kExprCallImport, 3, static_cast<byte>(index)
+#define WASM_CALL_IMPORT4(index, a, b, c, d) \
+  a, b, c, d, kExprCallImport, 4, static_cast<byte>(index)
+#define WASM_CALL_IMPORT5(index, a, b, c, d, e) \
+  a, b, c, d, e, kExprCallImport, 5, static_cast<byte>(index)
+#define WASM_CALL_IMPORTN(arity, index, ...) \
+  __VA_ARGS__, kExprCallImport, U32V_1(arity), static_cast<byte>(index),
+
 #define WASM_CALL_INDIRECT0(index, func) \
-  kExprCallIndirect, static_cast<byte>(index), func
-#define WASM_NOT(x) kExprI32Eqz, x
+  func, kExprCallIndirect, 0, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT1(index, func, a) \
+  func, a, kExprCallIndirect, 1, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT2(index, func, a, b) \
+  func, a, b, kExprCallIndirect, 2, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT3(index, func, a, b, c) \
+  func, a, b, c, kExprCallIndirect, 3, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT4(index, func, a, b, c, d) \
+  func, a, b, c, d, kExprCallIndirect, 4, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT5(index, func, a, b, c, d, e) \
+  func, a, b, c, d, e, kExprCallIndirect, 5, static_cast<byte>(index)
+#define WASM_CALL_INDIRECTN(arity, index, func, ...) \
+  func, __VA_ARGS__, kExprCallIndirect, U32V_1(arity), static_cast<byte>(index)
+
+#define WASM_NOT(x) x, kExprI32Eqz
+#define WASM_SEQ(...) __VA_ARGS__
 
 //------------------------------------------------------------------------------
 // Constructs that are composed of multiple bytecodes.
 //------------------------------------------------------------------------------
-#define WASM_WHILE(x, y) kExprLoop, 1, kExprIf, x, kExprBr, 0, y
-#define WASM_INC_LOCAL(index)                                          \
-  kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
-      static_cast<byte>(index), kExprI8Const, 1
-#define WASM_INC_LOCAL_BY(index, count)                                \
-  kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
-      static_cast<byte>(index), kExprI8Const, static_cast<int8_t>(count)
-
-#define WASM_UNOP(opcode, x) static_cast<byte>(opcode), x
-#define WASM_BINOP(opcode, x, y) static_cast<byte>(opcode), x, y
+#define WASM_WHILE(x, y) \
+  kExprLoop, x, kExprIf, y, kExprBr, ARITY_1, DEPTH_1, kExprEnd, kExprEnd
+#define WASM_INC_LOCAL(index)                                            \
+  kExprGetLocal, static_cast<byte>(index), kExprI8Const, 1, kExprI32Add, \
+      kExprSetLocal, static_cast<byte>(index)
+#define WASM_INC_LOCAL_BY(index, count)                     \
+  kExprGetLocal, static_cast<byte>(index), kExprI8Const,    \
+      static_cast<byte>(count), kExprI32Add, kExprSetLocal, \
+      static_cast<byte>(index)
+#define WASM_UNOP(opcode, x) x, static_cast<byte>(opcode)
+#define WASM_BINOP(opcode, x, y) x, y, static_cast<byte>(opcode)
 
 //------------------------------------------------------------------------------
 // Int32 operations
 //------------------------------------------------------------------------------
-#define WASM_I32_ADD(x, y) kExprI32Add, x, y
-#define WASM_I32_SUB(x, y) kExprI32Sub, x, y
-#define WASM_I32_MUL(x, y) kExprI32Mul, x, y
-#define WASM_I32_DIVS(x, y) kExprI32DivS, x, y
-#define WASM_I32_DIVU(x, y) kExprI32DivU, x, y
-#define WASM_I32_REMS(x, y) kExprI32RemS, x, y
-#define WASM_I32_REMU(x, y) kExprI32RemU, x, y
-#define WASM_I32_AND(x, y) kExprI32And, x, y
-#define WASM_I32_IOR(x, y) kExprI32Ior, x, y
-#define WASM_I32_XOR(x, y) kExprI32Xor, x, y
-#define WASM_I32_SHL(x, y) kExprI32Shl, x, y
-#define WASM_I32_SHR(x, y) kExprI32ShrU, x, y
-#define WASM_I32_SAR(x, y) kExprI32ShrS, x, y
-#define WASM_I32_ROR(x, y) kExprI32Ror, x, y
-#define WASM_I32_ROL(x, y) kExprI32Rol, x, y
-#define WASM_I32_EQ(x, y) kExprI32Eq, x, y
-#define WASM_I32_NE(x, y) kExprI32Ne, x, y
-#define WASM_I32_LTS(x, y) kExprI32LtS, x, y
-#define WASM_I32_LES(x, y) kExprI32LeS, x, y
-#define WASM_I32_LTU(x, y) kExprI32LtU, x, y
-#define WASM_I32_LEU(x, y) kExprI32LeU, x, y
-#define WASM_I32_GTS(x, y) kExprI32GtS, x, y
-#define WASM_I32_GES(x, y) kExprI32GeS, x, y
-#define WASM_I32_GTU(x, y) kExprI32GtU, x, y
-#define WASM_I32_GEU(x, y) kExprI32GeU, x, y
-#define WASM_I32_CLZ(x) kExprI32Clz, x
-#define WASM_I32_CTZ(x) kExprI32Ctz, x
-#define WASM_I32_POPCNT(x) kExprI32Popcnt, x
-#define WASM_I32_EQZ(x) kExprI32Eqz, x
+#define WASM_I32_ADD(x, y) x, y, kExprI32Add
+#define WASM_I32_SUB(x, y) x, y, kExprI32Sub
+#define WASM_I32_MUL(x, y) x, y, kExprI32Mul
+#define WASM_I32_DIVS(x, y) x, y, kExprI32DivS
+#define WASM_I32_DIVU(x, y) x, y, kExprI32DivU
+#define WASM_I32_REMS(x, y) x, y, kExprI32RemS
+#define WASM_I32_REMU(x, y) x, y, kExprI32RemU
+#define WASM_I32_AND(x, y) x, y, kExprI32And
+#define WASM_I32_IOR(x, y) x, y, kExprI32Ior
+#define WASM_I32_XOR(x, y) x, y, kExprI32Xor
+#define WASM_I32_SHL(x, y) x, y, kExprI32Shl
+#define WASM_I32_SHR(x, y) x, y, kExprI32ShrU
+#define WASM_I32_SAR(x, y) x, y, kExprI32ShrS
+#define WASM_I32_ROR(x, y) x, y, kExprI32Ror
+#define WASM_I32_ROL(x, y) x, y, kExprI32Rol
+#define WASM_I32_EQ(x, y) x, y, kExprI32Eq
+#define WASM_I32_NE(x, y) x, y, kExprI32Ne
+#define WASM_I32_LTS(x, y) x, y, kExprI32LtS
+#define WASM_I32_LES(x, y) x, y, kExprI32LeS
+#define WASM_I32_LTU(x, y) x, y, kExprI32LtU
+#define WASM_I32_LEU(x, y) x, y, kExprI32LeU
+#define WASM_I32_GTS(x, y) x, y, kExprI32GtS
+#define WASM_I32_GES(x, y) x, y, kExprI32GeS
+#define WASM_I32_GTU(x, y) x, y, kExprI32GtU
+#define WASM_I32_GEU(x, y) x, y, kExprI32GeU
+#define WASM_I32_CLZ(x) x, kExprI32Clz
+#define WASM_I32_CTZ(x) x, kExprI32Ctz
+#define WASM_I32_POPCNT(x) x, kExprI32Popcnt
+#define WASM_I32_EQZ(x) x, kExprI32Eqz
 
 //------------------------------------------------------------------------------
 // Int64 operations
 //------------------------------------------------------------------------------
-#define WASM_I64_ADD(x, y) kExprI64Add, x, y
-#define WASM_I64_SUB(x, y) kExprI64Sub, x, y
-#define WASM_I64_MUL(x, y) kExprI64Mul, x, y
-#define WASM_I64_DIVS(x, y) kExprI64DivS, x, y
-#define WASM_I64_DIVU(x, y) kExprI64DivU, x, y
-#define WASM_I64_REMS(x, y) kExprI64RemS, x, y
-#define WASM_I64_REMU(x, y) kExprI64RemU, x, y
-#define WASM_I64_AND(x, y) kExprI64And, x, y
-#define WASM_I64_IOR(x, y) kExprI64Ior, x, y
-#define WASM_I64_XOR(x, y) kExprI64Xor, x, y
-#define WASM_I64_SHL(x, y) kExprI64Shl, x, y
-#define WASM_I64_SHR(x, y) kExprI64ShrU, x, y
-#define WASM_I64_SAR(x, y) kExprI64ShrS, x, y
-#define WASM_I64_ROR(x, y) kExprI64Ror, x, y
-#define WASM_I64_ROL(x, y) kExprI64Rol, x, y
-#define WASM_I64_EQ(x, y) kExprI64Eq, x, y
-#define WASM_I64_NE(x, y) kExprI64Ne, x, y
-#define WASM_I64_LTS(x, y) kExprI64LtS, x, y
-#define WASM_I64_LES(x, y) kExprI64LeS, x, y
-#define WASM_I64_LTU(x, y) kExprI64LtU, x, y
-#define WASM_I64_LEU(x, y) kExprI64LeU, x, y
-#define WASM_I64_GTS(x, y) kExprI64GtS, x, y
-#define WASM_I64_GES(x, y) kExprI64GeS, x, y
-#define WASM_I64_GTU(x, y) kExprI64GtU, x, y
-#define WASM_I64_GEU(x, y) kExprI64GeU, x, y
-#define WASM_I64_CLZ(x) kExprI64Clz, x
-#define WASM_I64_CTZ(x) kExprI64Ctz, x
-#define WASM_I64_POPCNT(x) kExprI64Popcnt, x
-#define WASM_I64_EQZ(x) kExprI64Eqz, x
+#define WASM_I64_ADD(x, y) x, y, kExprI64Add
+#define WASM_I64_SUB(x, y) x, y, kExprI64Sub
+#define WASM_I64_MUL(x, y) x, y, kExprI64Mul
+#define WASM_I64_DIVS(x, y) x, y, kExprI64DivS
+#define WASM_I64_DIVU(x, y) x, y, kExprI64DivU
+#define WASM_I64_REMS(x, y) x, y, kExprI64RemS
+#define WASM_I64_REMU(x, y) x, y, kExprI64RemU
+#define WASM_I64_AND(x, y) x, y, kExprI64And
+#define WASM_I64_IOR(x, y) x, y, kExprI64Ior
+#define WASM_I64_XOR(x, y) x, y, kExprI64Xor
+#define WASM_I64_SHL(x, y) x, y, kExprI64Shl
+#define WASM_I64_SHR(x, y) x, y, kExprI64ShrU
+#define WASM_I64_SAR(x, y) x, y, kExprI64ShrS
+#define WASM_I64_ROR(x, y) x, y, kExprI64Ror
+#define WASM_I64_ROL(x, y) x, y, kExprI64Rol
+#define WASM_I64_EQ(x, y) x, y, kExprI64Eq
+#define WASM_I64_NE(x, y) x, y, kExprI64Ne
+#define WASM_I64_LTS(x, y) x, y, kExprI64LtS
+#define WASM_I64_LES(x, y) x, y, kExprI64LeS
+#define WASM_I64_LTU(x, y) x, y, kExprI64LtU
+#define WASM_I64_LEU(x, y) x, y, kExprI64LeU
+#define WASM_I64_GTS(x, y) x, y, kExprI64GtS
+#define WASM_I64_GES(x, y) x, y, kExprI64GeS
+#define WASM_I64_GTU(x, y) x, y, kExprI64GtU
+#define WASM_I64_GEU(x, y) x, y, kExprI64GeU
+#define WASM_I64_CLZ(x) x, kExprI64Clz
+#define WASM_I64_CTZ(x) x, kExprI64Ctz
+#define WASM_I64_POPCNT(x) x, kExprI64Popcnt
+#define WASM_I64_EQZ(x) x, kExprI64Eqz
 
 //------------------------------------------------------------------------------
 // Float32 operations
 //------------------------------------------------------------------------------
-#define WASM_F32_ADD(x, y) kExprF32Add, x, y
-#define WASM_F32_SUB(x, y) kExprF32Sub, x, y
-#define WASM_F32_MUL(x, y) kExprF32Mul, x, y
-#define WASM_F32_DIV(x, y) kExprF32Div, x, y
-#define WASM_F32_MIN(x, y) kExprF32Min, x, y
-#define WASM_F32_MAX(x, y) kExprF32Max, x, y
-#define WASM_F32_ABS(x) kExprF32Abs, x
-#define WASM_F32_NEG(x) kExprF32Neg, x
-#define WASM_F32_COPYSIGN(x, y) kExprF32CopySign, x, y
-#define WASM_F32_CEIL(x) kExprF32Ceil, x
-#define WASM_F32_FLOOR(x) kExprF32Floor, x
-#define WASM_F32_TRUNC(x) kExprF32Trunc, x
-#define WASM_F32_NEARESTINT(x) kExprF32NearestInt, x
-#define WASM_F32_SQRT(x) kExprF32Sqrt, x
-#define WASM_F32_EQ(x, y) kExprF32Eq, x, y
-#define WASM_F32_NE(x, y) kExprF32Ne, x, y
-#define WASM_F32_LT(x, y) kExprF32Lt, x, y
-#define WASM_F32_LE(x, y) kExprF32Le, x, y
-#define WASM_F32_GT(x, y) kExprF32Gt, x, y
-#define WASM_F32_GE(x, y) kExprF32Ge, x, y
+#define WASM_F32_ADD(x, y) x, y, kExprF32Add
+#define WASM_F32_SUB(x, y) x, y, kExprF32Sub
+#define WASM_F32_MUL(x, y) x, y, kExprF32Mul
+#define WASM_F32_DIV(x, y) x, y, kExprF32Div
+#define WASM_F32_MIN(x, y) x, y, kExprF32Min
+#define WASM_F32_MAX(x, y) x, y, kExprF32Max
+#define WASM_F32_ABS(x) x, kExprF32Abs
+#define WASM_F32_NEG(x) x, kExprF32Neg
+#define WASM_F32_COPYSIGN(x, y) x, y, kExprF32CopySign
+#define WASM_F32_CEIL(x) x, kExprF32Ceil
+#define WASM_F32_FLOOR(x) x, kExprF32Floor
+#define WASM_F32_TRUNC(x) x, kExprF32Trunc
+#define WASM_F32_NEARESTINT(x) x, kExprF32NearestInt
+#define WASM_F32_SQRT(x) x, kExprF32Sqrt
+#define WASM_F32_EQ(x, y) x, y, kExprF32Eq
+#define WASM_F32_NE(x, y) x, y, kExprF32Ne
+#define WASM_F32_LT(x, y) x, y, kExprF32Lt
+#define WASM_F32_LE(x, y) x, y, kExprF32Le
+#define WASM_F32_GT(x, y) x, y, kExprF32Gt
+#define WASM_F32_GE(x, y) x, y, kExprF32Ge
 
 //------------------------------------------------------------------------------
 // Float64 operations
 //------------------------------------------------------------------------------
-#define WASM_F64_ADD(x, y) kExprF64Add, x, y
-#define WASM_F64_SUB(x, y) kExprF64Sub, x, y
-#define WASM_F64_MUL(x, y) kExprF64Mul, x, y
-#define WASM_F64_DIV(x, y) kExprF64Div, x, y
-#define WASM_F64_MIN(x, y) kExprF64Min, x, y
-#define WASM_F64_MAX(x, y) kExprF64Max, x, y
-#define WASM_F64_ABS(x) kExprF64Abs, x
-#define WASM_F64_NEG(x) kExprF64Neg, x
-#define WASM_F64_COPYSIGN(x, y) kExprF64CopySign, x, y
-#define WASM_F64_CEIL(x) kExprF64Ceil, x
-#define WASM_F64_FLOOR(x) kExprF64Floor, x
-#define WASM_F64_TRUNC(x) kExprF64Trunc, x
-#define WASM_F64_NEARESTINT(x) kExprF64NearestInt, x
-#define WASM_F64_SQRT(x) kExprF64Sqrt, x
-#define WASM_F64_EQ(x, y) kExprF64Eq, x, y
-#define WASM_F64_NE(x, y) kExprF64Ne, x, y
-#define WASM_F64_LT(x, y) kExprF64Lt, x, y
-#define WASM_F64_LE(x, y) kExprF64Le, x, y
-#define WASM_F64_GT(x, y) kExprF64Gt, x, y
-#define WASM_F64_GE(x, y) kExprF64Ge, x, y
+#define WASM_F64_ADD(x, y) x, y, kExprF64Add
+#define WASM_F64_SUB(x, y) x, y, kExprF64Sub
+#define WASM_F64_MUL(x, y) x, y, kExprF64Mul
+#define WASM_F64_DIV(x, y) x, y, kExprF64Div
+#define WASM_F64_MIN(x, y) x, y, kExprF64Min
+#define WASM_F64_MAX(x, y) x, y, kExprF64Max
+#define WASM_F64_ABS(x) x, kExprF64Abs
+#define WASM_F64_NEG(x) x, kExprF64Neg
+#define WASM_F64_COPYSIGN(x, y) x, y, kExprF64CopySign
+#define WASM_F64_CEIL(x) x, kExprF64Ceil
+#define WASM_F64_FLOOR(x) x, kExprF64Floor
+#define WASM_F64_TRUNC(x) x, kExprF64Trunc
+#define WASM_F64_NEARESTINT(x) x, kExprF64NearestInt
+#define WASM_F64_SQRT(x) x, kExprF64Sqrt
+#define WASM_F64_EQ(x, y) x, y, kExprF64Eq
+#define WASM_F64_NE(x, y) x, y, kExprF64Ne
+#define WASM_F64_LT(x, y) x, y, kExprF64Lt
+#define WASM_F64_LE(x, y) x, y, kExprF64Le
+#define WASM_F64_GT(x, y) x, y, kExprF64Gt
+#define WASM_F64_GE(x, y) x, y, kExprF64Ge
 
 //------------------------------------------------------------------------------
 // Type conversions.
 //------------------------------------------------------------------------------
-#define WASM_I32_SCONVERT_F32(x) kExprI32SConvertF32, x
-#define WASM_I32_SCONVERT_F64(x) kExprI32SConvertF64, x
-#define WASM_I32_UCONVERT_F32(x) kExprI32UConvertF32, x
-#define WASM_I32_UCONVERT_F64(x) kExprI32UConvertF64, x
-#define WASM_I32_CONVERT_I64(x) kExprI32ConvertI64, x
-#define WASM_I64_SCONVERT_F32(x) kExprI64SConvertF32, x
-#define WASM_I64_SCONVERT_F64(x) kExprI64SConvertF64, x
-#define WASM_I64_UCONVERT_F32(x) kExprI64UConvertF32, x
-#define WASM_I64_UCONVERT_F64(x) kExprI64UConvertF64, x
-#define WASM_I64_SCONVERT_I32(x) kExprI64SConvertI32, x
-#define WASM_I64_UCONVERT_I32(x) kExprI64UConvertI32, x
-#define WASM_F32_SCONVERT_I32(x) kExprF32SConvertI32, x
-#define WASM_F32_UCONVERT_I32(x) kExprF32UConvertI32, x
-#define WASM_F32_SCONVERT_I64(x) kExprF32SConvertI64, x
-#define WASM_F32_UCONVERT_I64(x) kExprF32UConvertI64, x
-#define WASM_F32_CONVERT_F64(x) kExprF32ConvertF64, x
-#define WASM_F32_REINTERPRET_I32(x) kExprF32ReinterpretI32, x
-#define WASM_F64_SCONVERT_I32(x) kExprF64SConvertI32, x
-#define WASM_F64_UCONVERT_I32(x) kExprF64UConvertI32, x
-#define WASM_F64_SCONVERT_I64(x) kExprF64SConvertI64, x
-#define WASM_F64_UCONVERT_I64(x) kExprF64UConvertI64, x
-#define WASM_F64_CONVERT_F32(x) kExprF64ConvertF32, x
-#define WASM_F64_REINTERPRET_I64(x) kExprF64ReinterpretI64, x
-#define WASM_I32_REINTERPRET_F32(x) kExprI32ReinterpretF32, x
-#define WASM_I64_REINTERPRET_F64(x) kExprI64ReinterpretF64, x
+#define WASM_I32_SCONVERT_F32(x) x, kExprI32SConvertF32
+#define WASM_I32_SCONVERT_F64(x) x, kExprI32SConvertF64
+#define WASM_I32_UCONVERT_F32(x) x, kExprI32UConvertF32
+#define WASM_I32_UCONVERT_F64(x) x, kExprI32UConvertF64
+#define WASM_I32_CONVERT_I64(x) x, kExprI32ConvertI64
+#define WASM_I64_SCONVERT_F32(x) x, kExprI64SConvertF32
+#define WASM_I64_SCONVERT_F64(x) x, kExprI64SConvertF64
+#define WASM_I64_UCONVERT_F32(x) x, kExprI64UConvertF32
+#define WASM_I64_UCONVERT_F64(x) x, kExprI64UConvertF64
+#define WASM_I64_SCONVERT_I32(x) x, kExprI64SConvertI32
+#define WASM_I64_UCONVERT_I32(x) x, kExprI64UConvertI32
+#define WASM_F32_SCONVERT_I32(x) x, kExprF32SConvertI32
+#define WASM_F32_UCONVERT_I32(x) x, kExprF32UConvertI32
+#define WASM_F32_SCONVERT_I64(x) x, kExprF32SConvertI64
+#define WASM_F32_UCONVERT_I64(x) x, kExprF32UConvertI64
+#define WASM_F32_CONVERT_F64(x) x, kExprF32ConvertF64
+#define WASM_F32_REINTERPRET_I32(x) x, kExprF32ReinterpretI32
+#define WASM_F64_SCONVERT_I32(x) x, kExprF64SConvertI32
+#define WASM_F64_UCONVERT_I32(x) x, kExprF64UConvertI32
+#define WASM_F64_SCONVERT_I64(x) x, kExprF64SConvertI64
+#define WASM_F64_UCONVERT_I64(x) x, kExprF64UConvertI64
+#define WASM_F64_CONVERT_F32(x) x, kExprF64ConvertF32
+#define WASM_F64_REINTERPRET_I64(x) x, kExprF64ReinterpretI64
+#define WASM_I32_REINTERPRET_F32(x) x, kExprI32ReinterpretF32
+#define WASM_I64_REINTERPRET_F64(x) x, kExprI64ReinterpretF64
+
+#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
+#define SIZEOF_SIG_ENTRY_v_v 3
+
+#define SIG_ENTRY_v_x(a) kWasmFunctionTypeForm, 1, a, 0
+#define SIG_ENTRY_v_xx(a, b) kWasmFunctionTypeForm, 2, a, b, 0
+#define SIG_ENTRY_v_xxx(a, b, c) kWasmFunctionTypeForm, 3, a, b, c, 0
+#define SIZEOF_SIG_ENTRY_v_x 4
+#define SIZEOF_SIG_ENTRY_v_xx 5
+#define SIZEOF_SIG_ENTRY_v_xxx 6
+
+#define SIG_ENTRY_x(r) kWasmFunctionTypeForm, 0, 1, r
+#define SIG_ENTRY_x_x(r, a) kWasmFunctionTypeForm, 1, a, 1, r
+#define SIG_ENTRY_x_xx(r, a, b) kWasmFunctionTypeForm, 2, a, b, 1, r
+#define SIG_ENTRY_x_xxx(r, a, b, c) kWasmFunctionTypeForm, 3, a, b, c, 1, r
+#define SIZEOF_SIG_ENTRY_x 4
+#define SIZEOF_SIG_ENTRY_x_x 5
+#define SIZEOF_SIG_ENTRY_x_xx 6
+#define SIZEOF_SIG_ENTRY_x_xxx 7
 
 #endif  // V8_WASM_MACRO_GEN_H_
diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc
index a1c2a7a..c9a4279 100644
--- a/src/wasm/wasm-module.cc
+++ b/src/wasm/wasm-module.cc
@@ -2,14 +2,17 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/base/atomic-utils.h"
 #include "src/macro-assembler.h"
 #include "src/objects.h"
+#include "src/property-descriptor.h"
 #include "src/v8.h"
 
 #include "src/simulator.h"
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-function-name-table.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-result.h"
 
@@ -20,19 +23,28 @@
 namespace wasm {
 
 static const char* wasmSections[] = {
-#define F(enumerator, string) string,
+#define F(enumerator, order, string) string,
     FOR_EACH_WASM_SECTION_TYPE(F)
 #undef F
+        "<unknown>"  // entry for "Max"
 };
 
 static uint8_t wasmSectionsLengths[]{
-#define F(enumerator, string) sizeof(string) - 1,
+#define F(enumerator, order, string) sizeof(string) - 1,
     FOR_EACH_WASM_SECTION_TYPE(F)
 #undef F
+        9  // entry for "Max"
+};
+
+static uint8_t wasmSectionsOrders[]{
+#define F(enumerator, order, string) order,
+    FOR_EACH_WASM_SECTION_TYPE(F)
+#undef F
+        0  // entry for "Max"
 };
 
 static_assert(sizeof(wasmSections) / sizeof(wasmSections[0]) ==
-                  (size_t)WasmSection::Code::Max,
+                  (size_t)WasmSection::Code::Max + 1,
               "expected enum WasmSection::Code to be monotonic from 0");
 
 WasmSection::Code WasmSection::begin() { return (WasmSection::Code)0; }
@@ -49,6 +61,20 @@
   return wasmSectionsLengths[(size_t)code];
 }
 
+int WasmSection::getOrder(WasmSection::Code code) {
+  return wasmSectionsOrders[(size_t)code];
+}
+
+WasmSection::Code WasmSection::lookup(const byte* string, uint32_t length) {
+  // TODO(jfb) Linear search, it may be better to do a common-prefix search.
+  for (Code i = begin(); i != end(); i = next(i)) {
+    if (getNameLength(i) == length && 0 == memcmp(getName(i), string, length)) {
+      return i;
+    }
+  }
+  return Code::Max;
+}
+
 std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
   os << "WASM module with ";
   os << (module.min_mem_pages * module.kPageSize) << " min mem";
@@ -59,16 +85,9 @@
   return os;
 }
 
-
 std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
   os << "WASM function with signature " << *function.sig;
 
-  os << " locals: ";
-  if (function.local_i32_count) os << function.local_i32_count << " i32s ";
-  if (function.local_i64_count) os << function.local_i64_count << " i64s ";
-  if (function.local_f32_count) os << function.local_f32_count << " f32s ";
-  if (function.local_f64_count) os << function.local_f64_count << " f64s ";
-
   os << " code bytes: "
      << (function.code_end_offset - function.code_start_offset);
   return os;
@@ -80,7 +99,7 @@
     if (pair.module_) {
       WasmName name = pair.module_->GetName(pair.function_->name_offset,
                                             pair.function_->name_length);
-      os.write(name.name, name.length);
+      os.write(name.start(), name.length());
     } else {
       os << "+" << pair.function_->func_index;
     }
@@ -105,11 +124,11 @@
       // Create a placeholder code object and encode the corresponding index in
       // the {constant_pool_offset} field of the code object.
       // TODO(titzer): placeholder code objects are somewhat dangerous.
-      Handle<Code> self(nullptr, isolate_);
       byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0};  // fake instructions.
       CodeDesc desc = {buffer, 8, 8, 0, 0, nullptr};
       Handle<Code> code = isolate_->factory()->NewCode(
-          desc, Code::KindField::encode(Code::WASM_FUNCTION), self);
+          desc, Code::KindField::encode(Code::WASM_FUNCTION),
+          Handle<Object>::null());
       code->set_constant_pool_offset(index + kPlaceholderMarker);
       placeholder_code_[index] = code;
       function_code_[index] = code;
@@ -177,11 +196,12 @@
 
 namespace {
 // Internal constants for the layout of the module object.
-const int kWasmModuleInternalFieldCount = 4;
+const int kWasmModuleInternalFieldCount = 5;
 const int kWasmModuleFunctionTable = 0;
 const int kWasmModuleCodeTable = 1;
 const int kWasmMemArrayBuffer = 2;
 const int kWasmGlobalsArrayBuffer = 3;
+const int kWasmFunctionNamesArray = 4;
 
 size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>& globals) {
   uint32_t offset = 0;
@@ -195,7 +215,6 @@
   return offset;
 }
 
-
 void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
   for (const WasmDataSegment& segment : module->data_segments) {
     if (!segment.init) continue;
@@ -209,7 +228,6 @@
   }
 }
 
-
 Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
   if (module->function_table.size() == 0) {
     return Handle<FixedArray>::null();
@@ -314,20 +332,20 @@
                                               const char* error, uint32_t index,
                                               wasm::WasmName module_name,
                                               wasm::WasmName function_name) {
-  if (function_name.name) {
+  if (function_name.start()) {
     thrower.Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
-                  index, module_name.length, module_name.name,
-                  function_name.length, function_name.name, error);
+                  index, module_name.length(), module_name.start(),
+                  function_name.length(), function_name.start(), error);
   } else {
     thrower.Error("Import #%d module=\"%.*s\" error: %s", index,
-                  module_name.length, module_name.name, error);
+                  module_name.length(), module_name.start(), error);
   }
   thrower.Error("Import ");
   return MaybeHandle<JSFunction>();
 }
 
 static MaybeHandle<JSFunction> LookupFunction(
-    ErrorThrower& thrower, Factory* factory, Handle<JSObject> ffi,
+    ErrorThrower& thrower, Factory* factory, Handle<JSReceiver> ffi,
     uint32_t index, wasm::WasmName module_name, wasm::WasmName function_name) {
   if (ffi.is_null()) {
     return ReportFFIError(thrower, "FFI is not an object", index, module_name,
@@ -335,8 +353,7 @@
   }
 
   // Look up the module first.
-  Handle<String> name = factory->InternalizeUtf8String(
-      Vector<const char>(module_name.name, module_name.length));
+  Handle<String> name = factory->InternalizeUtf8String(module_name);
   MaybeHandle<Object> result = Object::GetProperty(ffi, name);
   if (result.is_null()) {
     return ReportFFIError(thrower, "module not found", index, module_name,
@@ -351,10 +368,9 @@
   }
 
   Handle<Object> function;
-  if (function_name.name) {
+  if (function_name.start()) {
     // Look up the function in the module.
-    Handle<String> name = factory->InternalizeUtf8String(
-        Vector<const char>(function_name.name, function_name.length));
+    Handle<String> name = factory->InternalizeUtf8String(function_name);
     MaybeHandle<Object> result = Object::GetProperty(module, name);
     if (result.is_null()) {
       return ReportFFIError(thrower, "function not found", index, module_name,
@@ -374,18 +390,253 @@
   return Handle<JSFunction>::cast(function);
 }
 
+namespace {
+// Fetches the compilation unit of a wasm function and executes its parallel
+// phase.
+bool FetchAndExecuteCompilationUnit(
+    Isolate* isolate,
+    std::vector<compiler::WasmCompilationUnit*>* compilation_units,
+    std::queue<compiler::WasmCompilationUnit*>* executed_units,
+    base::Mutex* result_mutex, base::AtomicNumber<size_t>* next_unit) {
+  DisallowHeapAllocation no_allocation;
+  DisallowHandleAllocation no_handles;
+  DisallowHandleDereference no_deref;
+  DisallowCodeDependencyChange no_dependency_change;
+
+  // - 1 because AtomicIntrement returns the value after the atomic increment.
+  size_t index = next_unit->Increment(1) - 1;
+  if (index >= compilation_units->size()) {
+    return false;
+  }
+
+  compiler::WasmCompilationUnit* unit = compilation_units->at(index);
+  if (unit != nullptr) {
+    compiler::ExecuteCompilation(unit);
+    {
+      base::LockGuard<base::Mutex> guard(result_mutex);
+      executed_units->push(unit);
+    }
+  }
+  return true;
+}
+
+class WasmCompilationTask : public CancelableTask {
+ public:
+  WasmCompilationTask(
+      Isolate* isolate,
+      std::vector<compiler::WasmCompilationUnit*>* compilation_units,
+      std::queue<compiler::WasmCompilationUnit*>* executed_units,
+      base::Semaphore* on_finished, base::Mutex* result_mutex,
+      base::AtomicNumber<size_t>* next_unit)
+      : CancelableTask(isolate),
+        isolate_(isolate),
+        compilation_units_(compilation_units),
+        executed_units_(executed_units),
+        on_finished_(on_finished),
+        result_mutex_(result_mutex),
+        next_unit_(next_unit) {}
+
+  void RunInternal() override {
+    while (FetchAndExecuteCompilationUnit(isolate_, compilation_units_,
+                                          executed_units_, result_mutex_,
+                                          next_unit_)) {
+    }
+    on_finished_->Signal();
+  }
+
+  Isolate* isolate_;
+  std::vector<compiler::WasmCompilationUnit*>* compilation_units_;
+  std::queue<compiler::WasmCompilationUnit*>* executed_units_;
+  base::Semaphore* on_finished_;
+  base::Mutex* result_mutex_;
+  base::AtomicNumber<size_t>* next_unit_;
+};
+
+void record_code_size(uint32_t& total_code_size, Code* code) {
+  if (FLAG_print_wasm_code_size) {
+    total_code_size += code->body_size() + code->relocation_info()->length();
+  }
+}
+
+bool CompileWrappersToImportedFunctions(Isolate* isolate, WasmModule* module,
+                                        const Handle<JSReceiver> ffi,
+                                        WasmModuleInstance* instance,
+                                        ErrorThrower* thrower, Factory* factory,
+                                        ModuleEnv* module_env,
+                                        uint32_t& total_code_size) {
+  uint32_t index = 0;
+  if (module->import_table.size() > 0) {
+    instance->import_code.reserve(module->import_table.size());
+    for (const WasmImport& import : module->import_table) {
+      WasmName module_name = module->GetNameOrNull(import.module_name_offset,
+                                                   import.module_name_length);
+      WasmName function_name = module->GetNameOrNull(
+          import.function_name_offset, import.function_name_length);
+      MaybeHandle<JSFunction> function = LookupFunction(
+          *thrower, factory, ffi, index, module_name, function_name);
+      if (function.is_null()) return false;
+
+      Handle<Code> code = compiler::CompileWasmToJSWrapper(
+          isolate, module_env, function.ToHandleChecked(), import.sig,
+          module_name, function_name);
+      instance->import_code.push_back(code);
+      record_code_size(total_code_size, *code);
+      index++;
+    }
+  }
+  return true;
+}
+
+void InitializeParallelCompilation(
+    Isolate* isolate, std::vector<WasmFunction>& functions,
+    std::vector<compiler::WasmCompilationUnit*>& compilation_units,
+    ModuleEnv& module_env, ErrorThrower& thrower) {
+  // Create a placeholder code object for all functions.
+  // TODO(ahaas): Maybe we could skip this for external functions.
+  for (uint32_t i = 0; i < functions.size(); i++) {
+    module_env.linker->GetFunctionCode(i);
+  }
+
+  for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); i++) {
+    compilation_units[i] = compiler::CreateWasmCompilationUnit(
+        &thrower, isolate, &module_env, &functions[i], i);
+  }
+}
+
+uint32_t* StartCompilationTasks(
+    Isolate* isolate,
+    std::vector<compiler::WasmCompilationUnit*>& compilation_units,
+    std::queue<compiler::WasmCompilationUnit*>& executed_units,
+    const base::SmartPointer<base::Semaphore>& pending_tasks,
+    base::Mutex& result_mutex, base::AtomicNumber<size_t>& next_unit) {
+  const size_t num_tasks =
+      Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+          V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+  uint32_t* task_ids = new uint32_t[num_tasks];
+  for (size_t i = 0; i < num_tasks; i++) {
+    WasmCompilationTask* task =
+        new WasmCompilationTask(isolate, &compilation_units, &executed_units,
+                                pending_tasks.get(), &result_mutex, &next_unit);
+    task_ids[i] = task->id();
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        task, v8::Platform::kShortRunningTask);
+  }
+  return task_ids;
+}
+
+void WaitForCompilationTasks(
+    Isolate* isolate, uint32_t* task_ids,
+    const base::SmartPointer<base::Semaphore>& pending_tasks) {
+  const size_t num_tasks =
+      Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+          V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+  for (size_t i = 0; i < num_tasks; i++) {
+    // If the task has not started yet, then we abort it. Otherwise we wait for
+    // it to finish.
+    if (!isolate->cancelable_task_manager()->TryAbort(task_ids[i])) {
+      pending_tasks->Wait();
+    }
+  }
+}
+
+void FinishCompilationUnits(
+    WasmModule* module,
+    std::queue<compiler::WasmCompilationUnit*>& executed_units,
+    std::vector<Handle<Code>>& results, base::Mutex& result_mutex) {
+  while (true) {
+    compiler::WasmCompilationUnit* unit = nullptr;
+    {
+      base::LockGuard<base::Mutex> guard(&result_mutex);
+      if (executed_units.empty()) {
+        break;
+      }
+      unit = executed_units.front();
+      executed_units.pop();
+    }
+    int j = compiler::GetIndexOfWasmCompilationUnit(unit);
+    results[j] = compiler::FinishCompilation(unit);
+  }
+}
+
+bool FinishCompilation(Isolate* isolate, WasmModule* module,
+                       const Handle<JSReceiver> ffi,
+                       const std::vector<Handle<Code>>& results,
+                       const WasmModuleInstance& instance,
+                       const Handle<FixedArray>& code_table,
+                       ErrorThrower& thrower, Factory* factory,
+                       ModuleEnv& module_env, uint32_t& total_code_size,
+                       PropertyDescriptor& desc) {
+  for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
+       i < module->functions.size(); i++) {
+    const WasmFunction& func = module->functions[i];
+    if (thrower.error()) break;
+
+    DCHECK_EQ(i, func.func_index);
+    WasmName str = module->GetName(func.name_offset, func.name_length);
+    Handle<Code> code = Handle<Code>::null();
+    Handle<JSFunction> function = Handle<JSFunction>::null();
+    Handle<String> function_name = Handle<String>::null();
+    if (FLAG_wasm_num_compilation_tasks != 0) {
+      code = results[i];
+    } else {
+      // Compile the function.
+      code =
+          compiler::CompileWasmFunction(&thrower, isolate, &module_env, &func);
+    }
+    if (code.is_null()) {
+      thrower.Error("Compilation of #%d:%.*s failed.", i, str.length(),
+                    str.start());
+      return false;
+    }
+    if (func.exported) {
+      function_name = factory->InternalizeUtf8String(str);
+      function = compiler::CompileJSToWasmWrapper(
+          isolate, &module_env, function_name, code, instance.js_object, i);
+      record_code_size(total_code_size, function->code());
+    }
+    if (!code.is_null()) {
+      // Install the code into the linker table.
+      module_env.linker->Finish(i, code);
+      code_table->set(i, *code);
+      record_code_size(total_code_size, *code);
+    }
+    if (func.exported) {
+      // Exported functions are installed as read-only properties on the
+      // module.
+      desc.set_value(function);
+      Maybe<bool> status = JSReceiver::DefineOwnProperty(
+          isolate, instance.js_object, function_name, &desc,
+          Object::THROW_ON_ERROR);
+      if (!status.IsJust())
+        thrower.Error("export of %.*s failed.", str.length(), str.start());
+    }
+  }
+  return true;
+}
+}  // namespace
+
 // Instantiates a wasm module as a JSObject.
 //  * allocates a backing store of {mem_size} bytes.
 //  * installs a named property "memory" for that buffer if exported
 //  * installs named properties on the object for exported functions
 //  * compiles wasm code to machine code
 MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
-                                              Handle<JSObject> ffi,
+                                              Handle<JSReceiver> ffi,
                                               Handle<JSArrayBuffer> memory) {
+  HistogramTimerScope wasm_instantiate_module_time_scope(
+      isolate->counters()->wasm_instantiate_module_time());
   this->shared_isolate = isolate;  // TODO(titzer): have a real shared isolate.
   ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
   Factory* factory = isolate->factory();
 
+  PropertyDescriptor desc;
+  desc.set_writable(false);
+
+  // If FLAG_print_wasm_code_size is set, this aggregates the sum of all code
+  // objects created for this module.
+  // TODO(titzer): switch this to TRACE_EVENT
+  uint32_t total_code_size = 0;
+
   //-------------------------------------------------------------------------
   // Allocate the instance and its JS counterpart.
   //-------------------------------------------------------------------------
@@ -402,6 +653,10 @@
   //-------------------------------------------------------------------------
   // Allocate and initialize the linear memory.
   //-------------------------------------------------------------------------
+  isolate->counters()->wasm_min_mem_pages_count()->AddSample(
+      instance.module->min_mem_pages);
+  isolate->counters()->wasm_max_mem_pages_count()->AddSample(
+      instance.module->max_mem_pages);
   if (memory.is_null()) {
     if (!AllocateMemory(&thrower, isolate, &instance)) {
       return MaybeHandle<JSObject>();
@@ -424,10 +679,9 @@
                                          *instance.globals_buffer);
   }
 
-  //-------------------------------------------------------------------------
-  // Compile wrappers to imported functions.
-  //-------------------------------------------------------------------------
-  uint32_t index = 0;
+  HistogramTimerScope wasm_compile_module_time_scope(
+      isolate->counters()->wasm_compile_module_time());
+
   instance.function_table = BuildFunctionTable(isolate, this);
   WasmLinker linker(isolate, functions.size());
   ModuleEnv module_env;
@@ -436,113 +690,147 @@
   module_env.linker = &linker;
   module_env.origin = origin;
 
-  if (import_table.size() > 0) {
-    instance.import_code.reserve(import_table.size());
-    for (const WasmImport& import : import_table) {
-      WasmName module_name =
-          GetNameOrNull(import.module_name_offset, import.module_name_length);
-      WasmName function_name = GetNameOrNull(import.function_name_offset,
-                                             import.function_name_length);
-      MaybeHandle<JSFunction> function = LookupFunction(
-          thrower, factory, ffi, index, module_name, function_name);
-      if (function.is_null()) return MaybeHandle<JSObject>();
-      Handle<Code> code = compiler::CompileWasmToJSWrapper(
-          isolate, &module_env, function.ToHandleChecked(), import.sig,
-          module_name, function_name);
-      instance.import_code.push_back(code);
-      index++;
-    }
+  //-------------------------------------------------------------------------
+  // Compile wrappers to imported functions.
+  //-------------------------------------------------------------------------
+  if (!CompileWrappersToImportedFunctions(isolate, this, ffi, &instance,
+                                          &thrower, factory, &module_env,
+                                          total_code_size)) {
+    return MaybeHandle<JSObject>();
   }
-
   //-------------------------------------------------------------------------
   // Compile all functions in the module.
   //-------------------------------------------------------------------------
+  {
+    isolate->counters()->wasm_functions_per_module()->AddSample(
+        static_cast<int>(functions.size()));
 
-  // First pass: compile each function and initialize the code table.
-  index = FLAG_skip_compiling_wasm_funcs;
-  while (index < functions.size()) {
-    const WasmFunction& func = functions[index];
-    if (thrower.error()) break;
-    DCHECK_EQ(index, func.func_index);
+    // Data structures for the parallel compilation.
+    std::vector<compiler::WasmCompilationUnit*> compilation_units(
+        functions.size());
+    std::queue<compiler::WasmCompilationUnit*> executed_units;
+    std::vector<Handle<Code>> results(functions.size());
 
-    WasmName str = GetName(func.name_offset, func.name_length);
-    WasmName str_null = {nullptr, 0};
-    Handle<String> name = factory->InternalizeUtf8String(
-        Vector<const char>(str.name, str.length));
-    Handle<Code> code = Handle<Code>::null();
-    Handle<JSFunction> function = Handle<JSFunction>::null();
-    if (func.external) {
-      // Lookup external function in FFI object.
-      MaybeHandle<JSFunction> function =
-          LookupFunction(thrower, factory, ffi, index, str, str_null);
-      if (function.is_null()) return MaybeHandle<JSObject>();
-      code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
-                                              function.ToHandleChecked(),
-                                              func.sig, str, str_null);
-    } else {
-      // Compile the function.
-      code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
-      if (code.is_null()) {
-        thrower.Error("Compilation of #%d:%.*s failed.", index, str.length,
-                      str.name);
-        return MaybeHandle<JSObject>();
+    if (FLAG_wasm_num_compilation_tasks != 0) {
+      //-----------------------------------------------------------------------
+      // For parallel compilation:
+      // 1) The main thread allocates a compilation unit for each wasm function
+      //    and stores them in the vector {compilation_units}.
+      // 2) The main thread spawns {WasmCompilationTask} instances which run on
+      //    the background threads.
+      // 3.a) The background threads and the main thread pick one compilation
+      //      unit at a time and execute the parallel phase of the compilation
+      //      unit. After finishing the execution of the parallel phase, the
+      //      result is enqueued in {executed_units}.
+      // 3.b) If {executed_units} contains a compilation unit, the main thread
+      //      dequeues it and finishes the compilation.
+      // 4) After the parallel phase of all compilation units has started, the
+      //    main thread waits for all {WasmCompilationTask} instances to finish.
+      // 5) The main thread finishes the compilation.
+
+      // Turn on the {CanonicalHandleScope} so that the background threads can
+      // use the node cache.
+      CanonicalHandleScope canonical(isolate);
+
+      // 1) The main thread allocates a compilation unit for each wasm function
+      //    and stores them in the vector {compilation_units}.
+      InitializeParallelCompilation(isolate, functions, compilation_units,
+                                    module_env, thrower);
+
+      // Objects for the synchronization with the background threads.
+      base::SmartPointer<base::Semaphore> pending_tasks(new base::Semaphore(0));
+      base::Mutex result_mutex;
+      base::AtomicNumber<size_t> next_unit(
+          static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
+
+      // 2) The main thread spawns {WasmCompilationTask} instances which run on
+      //    the background threads.
+      base::SmartArrayPointer<uint32_t> task_ids(
+          StartCompilationTasks(isolate, compilation_units, executed_units,
+                                pending_tasks, result_mutex, next_unit));
+
+      // 3.a) The background threads and the main thread pick one compilation
+      //      unit at a time and execute the parallel phase of the compilation
+      //      unit. After finishing the execution of the parallel phase, the
+      //      result is enqueued in {executed_units}.
+      while (FetchAndExecuteCompilationUnit(isolate, &compilation_units,
+                                            &executed_units, &result_mutex,
+                                            &next_unit)) {
+        // 3.b) If {executed_units} contains a compilation unit, the main thread
+        //      dequeues it and finishes the compilation unit. Compilation units
+        //      are finished concurrently to the background threads to save
+        //      memory.
+        FinishCompilationUnits(this, executed_units, results, result_mutex);
       }
-      if (func.exported) {
-        function = compiler::CompileJSToWasmWrapper(
-            isolate, &module_env, name, code, instance.js_object, index);
-      }
+      // 4) After the parallel phase of all compilation units has started, the
+      //    main thread waits for all {WasmCompilationTask} instances to finish.
+      WaitForCompilationTasks(isolate, task_ids.get(), pending_tasks);
+      // Finish the compilation of the remaining compilation units.
+      FinishCompilationUnits(this, executed_units, results, result_mutex);
     }
-    if (!code.is_null()) {
-      // Install the code into the linker table.
-      linker.Finish(index, code);
-      code_table->set(index, *code);
-    }
-    if (func.exported) {
-      // Exported functions are installed as read-only properties on the module.
-      JSObject::AddProperty(instance.js_object, name, function, READ_ONLY);
-    }
-    index++;
-  }
-
-  // Second pass: patch all direct call sites.
-  linker.Link(instance.function_table, this->function_table);
-  instance.js_object->SetInternalField(kWasmModuleFunctionTable,
-                                       Smi::FromInt(0));
-
-  //-------------------------------------------------------------------------
-  // Create and populate the exports object.
-  //-------------------------------------------------------------------------
-  if (export_table.size() > 0 || mem_export) {
-    index = 0;
-    // Create the "exports" object.
-    Handle<JSFunction> object_function = Handle<JSFunction>(
-        isolate->native_context()->object_function(), isolate);
-    Handle<JSObject> exports_object =
-        factory->NewJSObject(object_function, TENURED);
-    Handle<String> exports_name = factory->InternalizeUtf8String("exports");
-    JSObject::AddProperty(instance.js_object, exports_name, exports_object,
-                          READ_ONLY);
-
-    // Compile wrappers and add them to the exports object.
-    for (const WasmExport& exp : export_table) {
-      if (thrower.error()) break;
-      WasmName str = GetName(exp.name_offset, exp.name_length);
-      Handle<String> name = factory->InternalizeUtf8String(
-          Vector<const char>(str.name, str.length));
-      Handle<Code> code = linker.GetFunctionCode(exp.func_index);
-      Handle<JSFunction> function = compiler::CompileJSToWasmWrapper(
-          isolate, &module_env, name, code, instance.js_object, exp.func_index);
-      JSObject::AddProperty(exports_object, name, function, READ_ONLY);
+    // 5) The main thread finishes the compilation.
+    if (!FinishCompilation(isolate, this, ffi, results, instance, code_table,
+                           thrower, factory, module_env, total_code_size,
+                           desc)) {
+      return MaybeHandle<JSObject>();
     }
 
-    if (mem_export) {
-      // Export the memory as a named property.
-      Handle<String> name = factory->InternalizeUtf8String("memory");
-      JSObject::AddProperty(exports_object, name, instance.mem_buffer,
+    // Patch all direct call sites.
+    linker.Link(instance.function_table, this->function_table);
+    instance.js_object->SetInternalField(kWasmModuleFunctionTable,
+                                         Smi::FromInt(0));
+
+    //-------------------------------------------------------------------------
+    // Create and populate the exports object.
+    //-------------------------------------------------------------------------
+    if (export_table.size() > 0 || mem_export) {
+      // Create the "exports" object.
+      Handle<JSFunction> object_function = Handle<JSFunction>(
+          isolate->native_context()->object_function(), isolate);
+      Handle<JSObject> exports_object =
+          factory->NewJSObject(object_function, TENURED);
+      Handle<String> exports_name = factory->InternalizeUtf8String("exports");
+      JSObject::AddProperty(instance.js_object, exports_name, exports_object,
                             READ_ONLY);
+
+      // Compile wrappers and add them to the exports object.
+      for (const WasmExport& exp : export_table) {
+        if (thrower.error()) break;
+        WasmName str = GetName(exp.name_offset, exp.name_length);
+        Handle<String> name = factory->InternalizeUtf8String(str);
+        Handle<Code> code = linker.GetFunctionCode(exp.func_index);
+        Handle<JSFunction> function = compiler::CompileJSToWasmWrapper(
+            isolate, &module_env, name, code, instance.js_object,
+            exp.func_index);
+        record_code_size(total_code_size, function->code());
+        desc.set_value(function);
+        Maybe<bool> status = JSReceiver::DefineOwnProperty(
+            isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
+        if (!status.IsJust())
+          thrower.Error("export of %.*s failed.", str.length(), str.start());
+      }
+
+      if (mem_export) {
+        // Export the memory as a named property.
+        Handle<String> name = factory->InternalizeUtf8String("memory");
+        JSObject::AddProperty(exports_object, name, instance.mem_buffer,
+                              READ_ONLY);
+      }
     }
   }
 
+  //-------------------------------------------------------------------------
+  // Attach an array with function names and an array with offsets into that
+  // first array.
+  //-------------------------------------------------------------------------
+  {
+    Handle<Object> arr = BuildFunctionNamesTable(isolate, module_env.module);
+    instance.js_object->SetInternalField(kWasmFunctionNamesArray, *arr);
+  }
+
+  if (FLAG_print_wasm_code_size)
+    printf("Total generated wasm code: %u bytes\n", total_code_size);
+
   // Run the start function if one was specified.
   if (this->start_function_index >= 0) {
     HandleScope scope(isolate);
@@ -564,7 +852,6 @@
   return instance.js_object;
 }
 
-
 Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
   DCHECK(IsValidFunction(index));
   if (linker) return linker->GetFunctionCode(index);
@@ -585,7 +872,6 @@
   return GetWasmCallDescriptor(zone, function->sig);
 }
 
-
 int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
                                 const byte* module_end, bool asm_js) {
   HandleScope scope(isolate);
@@ -611,7 +897,6 @@
   return retval;
 }
 
-
 int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
   ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
   WasmModuleInstance instance(module);
@@ -644,19 +929,17 @@
   int main_index = 0;
   for (const WasmFunction& func : module->functions) {
     DCHECK_EQ(index, func.func_index);
-    if (!func.external) {
-      // Compile the function and install it in the code table.
-      Handle<Code> code =
-          compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
-      if (!code.is_null()) {
-        if (func.exported) {
-          main_code = code;
-          main_index = index;
-        }
-        linker.Finish(index, code);
+    // Compile the function and install it in the code table.
+    Handle<Code> code =
+        compiler::CompileWasmFunction(&thrower, isolate, &module_env, &func);
+    if (!code.is_null()) {
+      if (func.exported) {
+        main_code = code;
+        main_index = index;
       }
-      if (thrower.error()) return -1;
+      linker.Finish(index, code);
     }
+    if (thrower.error()) return -1;
     index++;
   }
 
@@ -693,6 +976,16 @@
   thrower.Error("WASM.compileRun() failed: Return value should be number");
   return -1;
 }
+
+Handle<Object> GetWasmFunctionName(Handle<JSObject> wasm, uint32_t func_index) {
+  Handle<Object> func_names_arr_obj = handle(
+      wasm->GetInternalField(kWasmFunctionNamesArray), wasm->GetIsolate());
+  if (func_names_arr_obj->IsUndefined())
+    return func_names_arr_obj;  // Return undefined.
+  return GetWasmFunctionNameFromTable(
+      Handle<ByteArray>::cast(func_names_arr_obj), func_index);
+}
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h
index 4e5aa78..2ac0425 100644
--- a/src/wasm/wasm-module.h
+++ b/src/wasm/wasm-module.h
@@ -16,6 +16,7 @@
 
 namespace compiler {
 class CallDescriptor;
+class WasmCompilationUnit;
 }
 
 namespace wasm {
@@ -23,69 +24,63 @@
 const size_t kMaxFunctionSize = 128 * 1024;
 const size_t kMaxStringSize = 256;
 const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x0a;
+const uint32_t kWasmVersion = 0x0b;
+const uint8_t kWasmFunctionTypeForm = 0x40;
 
 // WebAssembly sections are named as strings in the binary format, but
 // internally V8 uses an enum to handle them.
 //
 // Entries have the form F(enumerator, string).
-#define FOR_EACH_WASM_SECTION_TYPE(F)          \
-  F(Memory, "memory")                          \
-  F(Signatures, "signatures")                  \
-  F(Functions, "functions")                    \
-  F(Globals, "globals")                        \
-  F(DataSegments, "data_segments")             \
-  F(FunctionTable, "function_table")           \
-  F(End, "end")                                \
-  F(StartFunction, "start_function")           \
-  F(ImportTable, "import_table")               \
-  F(ExportTable, "export_table")               \
-  F(FunctionSignatures, "function_signatures") \
-  F(FunctionBodies, "function_bodies")         \
-  F(Names, "names")
+#define FOR_EACH_WASM_SECTION_TYPE(F)  \
+  F(Signatures, 1, "type")             \
+  F(ImportTable, 2, "import")          \
+  F(FunctionSignatures, 3, "function") \
+  F(FunctionTable, 4, "table")         \
+  F(Memory, 5, "memory")               \
+  F(ExportTable, 6, "export")          \
+  F(StartFunction, 7, "start")         \
+  F(FunctionBodies, 8, "code")         \
+  F(DataSegments, 9, "data")           \
+  F(Names, 10, "name")                 \
+  F(OldFunctions, 0, "old_function")   \
+  F(Globals, 0, "global")              \
+  F(End, 0, "end")
 
 // Contants for the above section types: {LEB128 length, characters...}.
 #define WASM_SECTION_MEMORY 6, 'm', 'e', 'm', 'o', 'r', 'y'
-#define WASM_SECTION_SIGNATURES \
-  10, 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', 's'
-#define WASM_SECTION_FUNCTIONS 9, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', 's'
-#define WASM_SECTION_GLOBALS 7, 'g', 'l', 'o', 'b', 'a', 'l', 's'
-#define WASM_SECTION_DATA_SEGMENTS \
-  13, 'd', 'a', 't', 'a', '_', 's', 'e', 'g', 'm', 'e', 'n', 't', 's'
-#define WASM_SECTION_FUNCTION_TABLE \
-  14, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 't', 'a', 'b', 'l', 'e'
+#define WASM_SECTION_SIGNATURES 4, 't', 'y', 'p', 'e'
+#define WASM_SECTION_OLD_FUNCTIONS \
+  12, 'o', 'l', 'd', '_', 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
+#define WASM_SECTION_GLOBALS 6, 'g', 'l', 'o', 'b', 'a', 'l'
+#define WASM_SECTION_DATA_SEGMENTS 4, 'd', 'a', 't', 'a'
+#define WASM_SECTION_FUNCTION_TABLE 5, 't', 'a', 'b', 'l', 'e'
 #define WASM_SECTION_END 3, 'e', 'n', 'd'
-#define WASM_SECTION_START_FUNCTION \
-  14, 's', 't', 'a', 'r', 't', '_', 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
-#define WASM_SECTION_IMPORT_TABLE \
-  12, 'i', 'm', 'p', 'o', 'r', 't', '_', 't', 'a', 'b', 'l', 'e'
-#define WASM_SECTION_EXPORT_TABLE \
-  12, 'e', 'x', 'p', 'o', 'r', 't', '_', 't', 'a', 'b', 'l', 'e'
-#define WASM_SECTION_FUNCTION_SIGNATURES                                    \
-  19, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 's', 'i', 'g', 'n', 'a', \
-      't', 'u', 'r', 'e', 's'
-#define WASM_SECTION_FUNCTION_BODIES \
-  15, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 'b', 'o', 'd', 'i', 'e', 's'
-#define WASM_SECTION_NAMES 5, 'n', 'a', 'm', 'e', 's'
+#define WASM_SECTION_START_FUNCTION 5, 's', 't', 'a', 'r', 't'
+#define WASM_SECTION_IMPORT_TABLE 6, 'i', 'm', 'p', 'o', 'r', 't'
+#define WASM_SECTION_EXPORT_TABLE 6, 'e', 'x', 'p', 'o', 'r', 't'
+#define WASM_SECTION_FUNCTION_SIGNATURES \
+  8, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
+#define WASM_SECTION_FUNCTION_BODIES 4, 'c', 'o', 'd', 'e'
+#define WASM_SECTION_NAMES 4, 'n', 'a', 'm', 'e'
 
 // Constants for the above section headers' size (LEB128 + characters).
 #define WASM_SECTION_MEMORY_SIZE ((size_t)7)
-#define WASM_SECTION_SIGNATURES_SIZE ((size_t)11)
-#define WASM_SECTION_FUNCTIONS_SIZE ((size_t)10)
-#define WASM_SECTION_GLOBALS_SIZE ((size_t)8)
-#define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)14)
-#define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)15)
+#define WASM_SECTION_SIGNATURES_SIZE ((size_t)5)
+#define WASM_SECTION_OLD_FUNCTIONS_SIZE ((size_t)13)
+#define WASM_SECTION_GLOBALS_SIZE ((size_t)7)
+#define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)5)
+#define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)6)
 #define WASM_SECTION_END_SIZE ((size_t)4)
-#define WASM_SECTION_START_FUNCTION_SIZE ((size_t)15)
-#define WASM_SECTION_IMPORT_TABLE_SIZE ((size_t)13)
-#define WASM_SECTION_EXPORT_TABLE_SIZE ((size_t)13)
-#define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)20)
-#define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)16)
-#define WASM_SECTION_NAMES_SIZE ((size_t)6)
+#define WASM_SECTION_START_FUNCTION_SIZE ((size_t)6)
+#define WASM_SECTION_IMPORT_TABLE_SIZE ((size_t)7)
+#define WASM_SECTION_EXPORT_TABLE_SIZE ((size_t)7)
+#define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)9)
+#define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)5)
+#define WASM_SECTION_NAMES_SIZE ((size_t)5)
 
 struct WasmSection {
   enum class Code : uint32_t {
-#define F(enumerator, string) enumerator,
+#define F(enumerator, order, string) enumerator,
     FOR_EACH_WASM_SECTION_TYPE(F)
 #undef F
         Max
@@ -94,13 +89,13 @@
   static WasmSection::Code end();
   static WasmSection::Code next(WasmSection::Code code);
   static const char* getName(Code code);
+  static int getOrder(Code code);
   static size_t getNameLength(Code code);
+  static WasmSection::Code lookup(const byte* string, uint32_t length);
 };
 
 enum WasmFunctionDeclBit {
   kDeclFunctionName = 0x01,
-  kDeclFunctionImport = 0x02,
-  kDeclFunctionLocals = 0x04,
   kDeclFunctionExport = 0x08
 };
 
@@ -108,6 +103,8 @@
 static const size_t kDeclMemorySize = 3;
 static const size_t kDeclDataSegmentSize = 13;
 
+static const uint32_t kMaxReturnCount = 1;
+
 // Static representation of a WASM function.
 struct WasmFunction {
   FunctionSig* sig;      // signature of the function.
@@ -117,12 +114,7 @@
   uint32_t name_length;  // length in bytes of the name.
   uint32_t code_start_offset;    // offset in the module bytes of code start.
   uint32_t code_end_offset;      // offset in the module bytes of code end.
-  uint16_t local_i32_count;      // number of i32 local variables.
-  uint16_t local_i64_count;      // number of i64 local variables.
-  uint16_t local_f32_count;      // number of f32 local variables.
-  uint16_t local_f64_count;      // number of f64 local variables.
   bool exported;                 // true if this function is exported.
-  bool external;  // true if this function is externally supplied.
 };
 
 // Static representation of an imported WASM function.
@@ -191,24 +183,38 @@
   WasmName GetName(uint32_t offset, uint32_t length) const {
     if (length == 0) return {"<?>", 3};  // no name.
     CHECK(BoundsCheck(offset, offset + length));
-    return {reinterpret_cast<const char*>(module_start + offset), length};
+    DCHECK_GE(static_cast<int>(length), 0);
+    return {reinterpret_cast<const char*>(module_start + offset),
+            static_cast<int>(length)};
+  }
+
+  // Get a string stored in the module bytes representing a function name.
+  WasmName GetName(WasmFunction* function) const {
+    return GetName(function->name_offset, function->name_length);
   }
 
   // Get a string stored in the module bytes representing a name.
   WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
     if (length == 0) return {NULL, 0};  // no name.
     CHECK(BoundsCheck(offset, offset + length));
-    return {reinterpret_cast<const char*>(module_start + offset), length};
+    DCHECK_GE(static_cast<int>(length), 0);
+    return {reinterpret_cast<const char*>(module_start + offset),
+            static_cast<int>(length)};
+  }
+
+  // Get a string stored in the module bytes representing a function name.
+  WasmName GetNameOrNull(WasmFunction* function) const {
+    return GetNameOrNull(function->name_offset, function->name_length);
   }
 
   // Checks the given offset range is contained within the module bytes.
   bool BoundsCheck(uint32_t start, uint32_t end) const {
     size_t size = module_end - module_start;
-    return start < size && end < size;
+    return start <= size && end <= size;
   }
 
   // Creates a new instantiation of the module in the given isolate.
-  MaybeHandle<JSObject> Instantiate(Isolate* isolate, Handle<JSObject> ffi,
+  MaybeHandle<JSObject> Instantiate(Isolate* isolate, Handle<JSReceiver> ffi,
                                     Handle<JSArrayBuffer> memory);
 };
 
@@ -318,6 +324,11 @@
 // given decoded module.
 int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module);
 
+// Extract a function name from the given wasm object.
+// Returns undefined if the function is unnamed or the function index is
+// invalid.
+Handle<Object> GetWasmFunctionName(Handle<JSObject> wasm, uint32_t func_index);
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-opcodes.cc b/src/wasm/wasm-opcodes.cc
index 736c4d9..a08fa8d 100644
--- a/src/wasm/wasm-opcodes.cc
+++ b/src/wasm/wasm-opcodes.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/wasm/wasm-opcodes.h"
+#include "src/messages.h"
 #include "src/signature.h"
 
 namespace v8 {
@@ -24,6 +25,18 @@
   return "Unknown";
 }
 
+const char* WasmOpcodes::ShortOpcodeName(WasmOpcode opcode) {
+  switch (opcode) {
+#define DECLARE_NAME_CASE(name, opcode, sig) \
+  case kExpr##name:                          \
+    return #name;
+    FOREACH_OPCODE(DECLARE_NAME_CASE)
+#undef DECLARE_NAME_CASE
+    default:
+      break;
+  }
+  return "Unknown";
+}
 
 std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
   if (sig.return_count() == 0) os << "v";
@@ -38,13 +51,10 @@
   return os;
 }
 
-
 #define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
 
-
 enum WasmOpcodeSig { FOREACH_SIGNATURE(DECLARE_SIG_ENUM) };
 
-
 // TODO(titzer): not static-initializer safe. Wrap in LazyInstance.
 #define DECLARE_SIG(name, ...)                      \
   static LocalType kTypes_##name[] = {__VA_ARGS__}; \
@@ -60,7 +70,6 @@
 
 static byte kSimpleExprSigTable[256];
 
-
 // Initialize the signature table.
 static void InitSigTable() {
 #define SET_SIG_TABLE(name, opcode, sig) \
@@ -70,15 +79,24 @@
 #undef SET_SIG_TABLE
 }
 
+class SigTable {
+ public:
+  SigTable() {
+    // TODO(ahaas): Move {InitSigTable} into the class.
+    InitSigTable();
+  }
+  FunctionSig* Signature(WasmOpcode opcode) const {
+    return const_cast<FunctionSig*>(
+        kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
+  }
+};
+
+static base::LazyInstance<SigTable>::type sig_table = LAZY_INSTANCE_INITIALIZER;
 
 FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
-  // TODO(titzer): use LazyInstance to make this thread safe.
-  if (kSimpleExprSigTable[kExprI32Add] == 0) InitSigTable();
-  return const_cast<FunctionSig*>(
-      kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
+  return sig_table.Get().Signature(opcode);
 }
 
-
 // TODO(titzer): pull WASM_64 up to a common header.
 #if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
 #define WASM_64 1
@@ -86,64 +104,20 @@
 #define WASM_64 0
 #endif
 
-
-bool WasmOpcodes::IsSupported(WasmOpcode opcode) {
-#if !WASM_64
-  switch (opcode) {
-    // Opcodes not supported on 32-bit platforms.
-    case kExprI64Add:
-    case kExprI64Sub:
-    case kExprI64Mul:
-    case kExprI64DivS:
-    case kExprI64DivU:
-    case kExprI64RemS:
-    case kExprI64RemU:
-    case kExprI64And:
-    case kExprI64Ior:
-    case kExprI64Xor:
-    case kExprI64Shl:
-    case kExprI64ShrU:
-    case kExprI64ShrS:
-    case kExprI64Ror:
-    case kExprI64Rol:
-    case kExprI64Eq:
-    case kExprI64Ne:
-    case kExprI64LtS:
-    case kExprI64LeS:
-    case kExprI64LtU:
-    case kExprI64LeU:
-    case kExprI64GtS:
-    case kExprI64GeS:
-    case kExprI64GtU:
-    case kExprI64GeU:
-
-    case kExprI32ConvertI64:
-    case kExprI64SConvertI32:
-    case kExprI64UConvertI32:
-
-    case kExprF64ReinterpretI64:
-    case kExprI64ReinterpretF64:
-
-    case kExprI64Clz:
-    case kExprI64Ctz:
-    case kExprI64Popcnt:
-
-    case kExprF32SConvertI64:
-    case kExprF32UConvertI64:
-    case kExprF64SConvertI64:
-    case kExprF64UConvertI64:
-    case kExprI64SConvertF32:
-    case kExprI64SConvertF64:
-    case kExprI64UConvertF32:
-    case kExprI64UConvertF64:
-
-      return false;
+int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
+  switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+  case k##name:                     \
+    return MessageTemplate::kWasm##name;
+    FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
     default:
-      return true;
+      return MessageTemplate::kNone;
   }
-#else
-  return true;
-#endif
+}
+
+const char* WasmOpcodes::TrapReasonMessage(TrapReason reason) {
+  return MessageTemplate::TemplateString(TrapReasonToMessageId(reason));
 }
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h
index 52f85aa..764c503 100644
--- a/src/wasm/wasm-opcodes.h
+++ b/src/wasm/wasm-opcodes.h
@@ -49,12 +49,10 @@
 typedef Signature<LocalType> FunctionSig;
 std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
 
-struct WasmName {
-  const char* name;
-  uint32_t length;
-};
+typedef Vector<const char> WasmName;
 
-// TODO(titzer): Renumber all the opcodes to fill in holes.
+typedef int WasmCodePosition;
+const WasmCodePosition kNoCodePosition = -1;
 
 // Control expressions and blocks.
 #define FOREACH_CONTROL_OPCODE(V) \
@@ -62,29 +60,29 @@
   V(Block, 0x01, _)               \
   V(Loop, 0x02, _)                \
   V(If, 0x03, _)                  \
-  V(IfElse, 0x04, _)              \
+  V(Else, 0x04, _)                \
   V(Select, 0x05, _)              \
   V(Br, 0x06, _)                  \
   V(BrIf, 0x07, _)                \
   V(BrTable, 0x08, _)             \
-  V(Return, 0x14, _)              \
-  V(Unreachable, 0x15, _)
+  V(Return, 0x09, _)              \
+  V(Unreachable, 0x0a, _)         \
+  V(End, 0x0F, _)
 
 // Constants, locals, globals, and calls.
 #define FOREACH_MISC_OPCODE(V) \
-  V(I8Const, 0x09, _)          \
-  V(I32Const, 0x0a, _)         \
-  V(I64Const, 0x0b, _)         \
-  V(F64Const, 0x0c, _)         \
-  V(F32Const, 0x0d, _)         \
-  V(GetLocal, 0x0e, _)         \
-  V(SetLocal, 0x0f, _)         \
-  V(LoadGlobal, 0x10, _)       \
-  V(StoreGlobal, 0x11, _)      \
-  V(CallFunction, 0x12, _)     \
-  V(CallIndirect, 0x13, _)     \
-  V(CallImport, 0x1F, _)       \
-  V(DeclLocals, 0x1E, _)
+  V(I32Const, 0x10, _)         \
+  V(I64Const, 0x11, _)         \
+  V(F64Const, 0x12, _)         \
+  V(F32Const, 0x13, _)         \
+  V(GetLocal, 0x14, _)         \
+  V(SetLocal, 0x15, _)         \
+  V(CallFunction, 0x16, _)     \
+  V(CallIndirect, 0x17, _)     \
+  V(CallImport, 0x18, _)       \
+  V(I8Const, 0xcb, _)          \
+  V(LoadGlobal, 0xcc, _)       \
+  V(StoreGlobal, 0xcd, _)
 
 // Load memory expressions.
 #define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -258,29 +256,27 @@
   V(F64Log, 0xc7, d_d)                 \
   V(F64Atan2, 0xc8, d_dd)              \
   V(F64Pow, 0xc9, d_dd)                \
-  V(F64Mod, 0xca, d_dd)
-
-// TODO(titzer): sketch of asm-js compatibility bytecodes
-/* V(I32AsmjsDivS, 0xd0, i_ii)          \ */
-/* V(I32AsmjsDivU, 0xd1, i_ii)          \ */
-/* V(I32AsmjsRemS, 0xd2, i_ii)          \ */
-/* V(I32AsmjsRemU, 0xd3, i_ii)          \ */
-/* V(I32AsmjsLoad8S, 0xd4, i_i)         \ */
-/* V(I32AsmjsLoad8U, 0xd5, i_i)         \ */
-/* V(I32AsmjsLoad16S, 0xd6, i_i)        \ */
-/* V(I32AsmjsLoad16U, 0xd7, i_i)        \ */
-/* V(I32AsmjsLoad, 0xd8, i_i)           \ */
-/* V(F32AsmjsLoad, 0xd9, f_i)           \ */
-/* V(F64AsmjsLoad, 0xda, d_i)           \ */
-/* V(I32AsmjsStore8, 0xdb, i_i)         \ */
-/* V(I32AsmjsStore16, 0xdc, i_i)        \ */
-/* V(I32AsmjsStore, 0xdd, i_ii)         \ */
-/* V(F32AsmjsStore, 0xde, i_if)         \ */
-/* V(F64AsmjsStore, 0xdf, i_id)         \ */
-/* V(I32SAsmjsConvertF32, 0xe0, i_f)    \ */
-/* V(I32UAsmjsConvertF32, 0xe1, i_f)    \ */
-/* V(I32SAsmjsConvertF64, 0xe2, i_d)    \ */
-/* V(I32SAsmjsConvertF64, 0xe3, i_d) */
+  V(F64Mod, 0xca, d_dd)                \
+  V(I32AsmjsDivS, 0xd0, i_ii)          \
+  V(I32AsmjsDivU, 0xd1, i_ii)          \
+  V(I32AsmjsRemS, 0xd2, i_ii)          \
+  V(I32AsmjsRemU, 0xd3, i_ii)          \
+  V(I32AsmjsLoadMem8S, 0xd4, i_i)      \
+  V(I32AsmjsLoadMem8U, 0xd5, i_i)      \
+  V(I32AsmjsLoadMem16S, 0xd6, i_i)     \
+  V(I32AsmjsLoadMem16U, 0xd7, i_i)     \
+  V(I32AsmjsLoadMem, 0xd8, i_i)        \
+  V(F32AsmjsLoadMem, 0xd9, f_i)        \
+  V(F64AsmjsLoadMem, 0xda, d_i)        \
+  V(I32AsmjsStoreMem8, 0xdb, i_ii)     \
+  V(I32AsmjsStoreMem16, 0xdc, i_ii)    \
+  V(I32AsmjsStoreMem, 0xdd, i_ii)      \
+  V(F32AsmjsStoreMem, 0xde, f_if)      \
+  V(F64AsmjsStoreMem, 0xdf, d_id)      \
+  V(I32AsmjsSConvertF32, 0xe0, i_f)    \
+  V(I32AsmjsUConvertF32, 0xe1, i_f)    \
+  V(I32AsmjsSConvertF64, 0xe2, i_d)    \
+  V(I32AsmjsUConvertF64, 0xe3, i_d)
 
 // All opcodes.
 #define FOREACH_OPCODE(V)     \
@@ -330,25 +326,33 @@
 };
 
 // The reason for a trap.
+#define FOREACH_WASM_TRAPREASON(V) \
+  V(TrapUnreachable)          \
+  V(TrapMemOutOfBounds)       \
+  V(TrapDivByZero)            \
+  V(TrapDivUnrepresentable)   \
+  V(TrapRemByZero)            \
+  V(TrapFloatUnrepresentable) \
+  V(TrapFuncInvalid)          \
+  V(TrapFuncSigMismatch)
+
 enum TrapReason {
-  kTrapUnreachable,
-  kTrapMemOutOfBounds,
-  kTrapDivByZero,
-  kTrapDivUnrepresentable,
-  kTrapRemByZero,
-  kTrapFloatUnrepresentable,
-  kTrapFuncInvalid,
-  kTrapFuncSigMismatch,
+#define DECLARE_ENUM(name) k##name,
+  FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
   kTrapCount
+#undef DECLARE_ENUM
 };
 
 // A collection of opcode-related static methods.
 class WasmOpcodes {
  public:
-  static bool IsSupported(WasmOpcode opcode);
   static const char* OpcodeName(WasmOpcode opcode);
+  static const char* ShortOpcodeName(WasmOpcode opcode);
   static FunctionSig* Signature(WasmOpcode opcode);
 
+  static int TrapReasonToMessageId(TrapReason reason);
+  static const char* TrapReasonMessage(TrapReason reason);
+
   static byte MemSize(MachineType type) {
     return 1 << ElementSizeLog2Of(type.representation());
   }
@@ -508,29 +512,6 @@
         return "<unknown>";
     }
   }
-
-  static const char* TrapReasonName(TrapReason reason) {
-    switch (reason) {
-      case kTrapUnreachable:
-        return "unreachable";
-      case kTrapMemOutOfBounds:
-        return "memory access out of bounds";
-      case kTrapDivByZero:
-        return "divide by zero";
-      case kTrapDivUnrepresentable:
-        return "divide result unrepresentable";
-      case kTrapRemByZero:
-        return "remainder by zero";
-      case kTrapFloatUnrepresentable:
-        return "integer result unrepresentable";
-      case kTrapFuncInvalid:
-        return "invalid function";
-      case kTrapFuncSigMismatch:
-        return "function signature mismatch";
-      default:
-        return "<?>";
-    }
-  }
 };
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-result.cc b/src/wasm/wasm-result.cc
index 4fd17ee..3de5812 100644
--- a/src/wasm/wasm-result.cc
+++ b/src/wasm/wasm-result.cc
@@ -28,7 +28,6 @@
   return os;
 }
 
-
 void ErrorThrower::Error(const char* format, ...) {
   if (error_) return;  // only report the first error.
   error_ = true;
diff --git a/src/wasm/wasm-result.h b/src/wasm/wasm-result.h
index 59ab29e..b650c33 100644
--- a/src/wasm/wasm-result.h
+++ b/src/wasm/wasm-result.h
@@ -5,6 +5,7 @@
 #ifndef V8_WASM_RESULT_H_
 #define V8_WASM_RESULT_H_
 
+#include "src/base/compiler-specific.h"
 #include "src/base/smart-pointers.h"
 
 #include "src/globals.h"
@@ -93,13 +94,13 @@
   ErrorThrower(Isolate* isolate, const char* context)
       : isolate_(isolate), context_(context), error_(false) {}
 
-  void Error(const char* fmt, ...);
+  PRINTF_FORMAT(2, 3) void Error(const char* fmt, ...);
 
   template <typename T>
   void Failed(const char* error, Result<T>& result) {
     std::ostringstream str;
     str << error << result;
-    return Error(str.str().c_str());
+    return Error("%s", str.str().c_str());
   }
 
   bool error() const { return error_; }
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index f32f407..0af8f93 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -78,7 +78,8 @@
 void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
   DCHECK(RelocInfo::IsRuntimeEntry(rmode));
   RecordRelocInfo(rmode);
-  emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start()));
+  emitl(static_cast<uint32_t>(
+      entry - isolate()->heap()->memory_allocator()->code_range()->start()));
 }
 
 
@@ -299,7 +300,8 @@
 
 
 Address Assembler::runtime_entry_at(Address pc) {
-  return Memory::int32_at(pc) + isolate()->code_range()->start();
+  return Memory::int32_at(pc) +
+         isolate()->heap()->memory_allocator()->code_range()->start();
 }
 
 // -----------------------------------------------------------------------------
@@ -326,11 +328,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Memory::Address_at(pc_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
@@ -368,21 +365,6 @@
   }
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Memory::Address_at(pc_) = updated_reference;
-  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
-  }
-}
-
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Memory::Object_at(pc_);
@@ -538,7 +520,7 @@
   }
 }
 
-
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 214b786..5f8fb68 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -114,6 +114,45 @@
       CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
 }
 
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Memory::Address_at(pc_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return Memory::uint32_at(pc_);
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_reference &&
+           updated_reference < new_base + new_size);
+    Memory::Address_at(pc_) = updated_reference;
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Memory::uint32_at(pc_) = updated_size_reference;
+  } else {
+    UNREACHABLE();
+  }
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
+  }
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of Operand
@@ -563,14 +602,17 @@
   if (is_int8(src.value_)) {
     emit(0x83);
     emit_modrm(subcode, dst);
+    if (!RelocInfo::IsNone(src.rmode_)) {
+      RecordRelocInfo(src.rmode_);
+    }
     emit(src.value_);
   } else if (dst.is(rax)) {
     emit(0x05 | (subcode << 3));
-    emitl(src.value_);
+    emit(src);
   } else {
     emit(0x81);
     emit_modrm(subcode, dst);
-    emitl(src.value_);
+    emit(src);
   }
 }
 
@@ -583,11 +625,14 @@
   if (is_int8(src.value_)) {
     emit(0x83);
     emit_operand(subcode, dst);
+    if (!RelocInfo::IsNone(src.rmode_)) {
+      RecordRelocInfo(src.rmode_);
+    }
     emit(src.value_);
   } else {
     emit(0x81);
     emit_operand(subcode, dst);
-    emitl(src.value_);
+    emit(src);
   }
 }
 
@@ -1484,7 +1529,6 @@
   movq(dst, static_cast<int64_t>(value), rmode);
 }
 
-
 // Loads the ip-relative location of the src label into the target location
 // (as a 32-bit offset sign extended to 64-bit).
 void Assembler::movl(const Operand& dst, Label* src) {
@@ -1909,6 +1953,25 @@
   emit_modrm(src, dst);
 }
 
+void Assembler::xchgb(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  if (!reg.is_byte_register()) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(reg, op);
+  } else {
+    emit_optional_rex_32(reg, op);
+  }
+  emit(0x86);
+  emit_operand(reg, op);
+}
+
+void Assembler::xchgw(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(reg, op);
+  emit(0x87);
+  emit_operand(reg, op);
+}
 
 void Assembler::emit_xchg(Register dst, Register src, int size) {
   EnsureSpace ensure_space(this);
@@ -2032,14 +2095,14 @@
   emit(0x66);
   if (reg.is(rax)) {
     emit(0xA9);
-    emit(mask.value_);
+    emitw(mask.value_);
   } else {
     if (reg.low_bits() == 4) {
       emit_rex_32(reg);
     }
     emit(0xF7);
     emit_modrm(0x0, reg);
-    emit(mask.value_);
+    emitw(mask.value_);
   }
 }
 
@@ -2050,7 +2113,7 @@
   emit_optional_rex_32(rax, op);
   emit(0xF7);
   emit_operand(rax, op);
-  emit(mask.value_);
+  emitw(mask.value_);
 }
 
 void Assembler::testw(const Operand& op, Register reg) {
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index e48f358..77a1a57 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -183,6 +183,8 @@
   V(xmm14)                  \
   V(xmm15)
 
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+
 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
   V(xmm1)                               \
   V(xmm2)                               \
@@ -200,8 +202,7 @@
   V(xmm14)                              \
   V(xmm15)
 
-
-struct DoubleRegister {
+struct XMMRegister {
   enum Code {
 #define REGISTER_CODE(R) kCode_##R,
     DOUBLE_REGISTERS(REGISTER_CODE)
@@ -212,15 +213,15 @@
 
   static const int kMaxNumRegisters = Code::kAfterLast;
 
-  static DoubleRegister from_code(int code) {
-    DoubleRegister result = {code};
+  static XMMRegister from_code(int code) {
+    XMMRegister result = {code};
     return result;
   }
 
   const char* ToString();
   bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
-  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+  bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
   int code() const {
     DCHECK(is_valid());
     return reg_code;
@@ -238,6 +239,11 @@
   int reg_code;
 };
 
+typedef XMMRegister FloatRegister;
+
+typedef XMMRegister DoubleRegister;
+
+typedef XMMRegister Simd128Register;
 
 #define DECLARE_REGISTER(R) \
   const DoubleRegister R = {DoubleRegister::kCode_##R};
@@ -245,11 +251,6 @@
 #undef DECLARE_REGISTER
 const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
 
-
-typedef DoubleRegister XMMRegister;
-
-typedef DoubleRegister Simd128Register;
-
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -334,6 +335,8 @@
 class Immediate BASE_EMBEDDED {
  public:
   explicit Immediate(int32_t value) : value_(value) {}
+  explicit Immediate(int32_t value, RelocInfo::Mode rmode)
+      : value_(value), rmode_(rmode) {}
   explicit Immediate(Smi* value) {
     DCHECK(SmiValuesAre31Bits());  // Only available for 31-bit SMI.
     value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
@@ -341,6 +344,7 @@
 
  private:
   int32_t value_;
+  RelocInfo::Mode rmode_ = RelocInfo::NONE32;
 
   friend class Assembler;
 };
@@ -784,6 +788,9 @@
   void decb(Register dst);
   void decb(const Operand& dst);
 
+  void xchgb(Register reg, const Operand& op);
+  void xchgw(Register reg, const Operand& op);
+
   // Sign-extends rax into rdx:rax.
   void cqo();
   // Sign-extends eax into edx:eax.
@@ -1689,7 +1696,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
                                           ConstantPoolEntry::Access access,
@@ -1755,7 +1762,12 @@
                                RelocInfo::Mode rmode,
                                TypeFeedbackId ast_id = TypeFeedbackId::None());
   inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
-  void emit(Immediate x) { emitl(x.value_); }
+  void emit(Immediate x) {
+    if (!RelocInfo::IsNone(x.rmode_)) {
+      RecordRelocInfo(x.rmode_);
+    }
+    emitl(x.value_);
+  }
 
   // Emits a REX prefix that encodes a 64-bit operand size and
   // the top bit of both register codes.
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 3163783..419ee0f 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -185,16 +185,9 @@
     __ j(greater_equal, &loop);
 
     // Call the function.
-    if (is_api_function) {
-      __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ Call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(rax);
-      __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+    ParameterCount actual(rax);
+    __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -464,6 +457,146 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : the value to pass to the generator
+  //  -- rbx    : the JSGeneratorObject to resume
+  //  -- rdx    : the resume mode (tagged)
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(rbx);
+
+  // Store input value into generator object.
+  __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOffset), rax);
+  __ RecordWriteField(rbx, JSGeneratorObject::kInputOffset, rax, rcx,
+                      kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
+
+  // Load suspended function and context.
+  __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+  __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  Operand step_in_enabled_operand = masm->ExternalOperand(step_in_enabled);
+  __ cmpb(step_in_enabled_operand, Immediate(0));
+  __ j(equal, &skip_flooding);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(rbx);
+    __ Push(rdx);
+    __ Push(rdi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(rdx);
+    __ Pop(rbx);
+    __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Pop return address.
+  __ PopReturnAddressTo(rax);
+
+  // Push receiver.
+  __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+
+  // ----------- S t a t e -------------
+  //  -- rax    : return address
+  //  -- rbx    : the JSGeneratorObject to resume
+  //  -- rdx    : the resume mode (tagged)
+  //  -- rdi    : generator function
+  //  -- rsi    : generator context
+  //  -- rsp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadSharedFunctionInfoSpecialField(
+      rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ subl(rcx, Immediate(1));
+    __ j(carry, &done_loop, Label::kNear);
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
+  __ j(not_equal, &old_generator);
+
+  // New-style (ignition/turbofan) generator object.
+  {
+    __ PushReturnAddressFrom(rax);
+    __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadSharedFunctionInfoSpecialField(
+        rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ movp(rdx, rbx);
+    __ jmp(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Old-style (full-codegen) generator object.
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushReturnAddressFrom(rax);  // Return address.
+    __ Push(rbp);                   // Caller's frame pointer.
+    __ Move(rbp, rsp);
+    __ Push(rsi);  // Callee's context.
+    __ Push(rdi);  // Callee's JS Function.
+
+    // Restore the operand stack.
+    __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+    __ SmiToInteger32(rax, FieldOperand(rsi, FixedArray::kLengthOffset));
+    {
+      Label done_loop, loop;
+      __ Set(rcx, 0);
+      __ bind(&loop);
+      __ cmpl(rcx, rax);
+      __ j(equal, &done_loop, Label::kNear);
+      __ Push(
+          FieldOperand(rsi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+      __ addl(rcx, Immediate(1));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ LoadRoot(FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset),
+                Heap::kEmptyFixedArrayRootIndex);
+
+    // Restore context.
+    __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+
+    // Resume the generator function at the continuation.
+    __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+    __ SmiToInteger64(
+        rcx, FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
+    __ leap(rdx, FieldOperand(rdx, rcx, times_1, Code::kHeaderSize));
+    __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
+            Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+    __ movp(rax, rbx);  // Continuation expects generator object in rax.
+    __ jmp(rdx);
+  }
+}
 
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
@@ -480,6 +613,8 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
@@ -490,10 +625,9 @@
   __ Push(rdi);  // Callee's JS function.
   __ Push(rdx);  // Callee's new target.
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into edi (InterpreterBytecodeRegister).
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-
   Label load_debug_bytecode_array, bytecode_array_loaded;
   DCHECK_EQ(Smi::FromInt(0), DebugInfo::uninitialized());
   __ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
@@ -503,18 +637,26 @@
           FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ CompareRoot(kInterpreterBytecodeArrayRegister,
+                 Heap::kUndefinedValueRootIndex);
+  __ j(equal, &bytecode_array_not_present);
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
     __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
                      rax);
     __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
-  // Push bytecode array.
+  // Load initial bytecode offset.
+  __ movp(kInterpreterBytecodeOffsetRegister,
+          Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+  // Push bytecode array and Smi tagged bytecode offset.
   __ Push(kInterpreterBytecodeArrayRegister);
-  // Push zero for bytecode array offset.
-  __ Push(Immediate(0));
+  __ Integer32ToSmi(rcx, kInterpreterBytecodeOffsetRegister);
+  __ Push(rcx);
 
   // Allocate the local and temporary register file on the stack.
   {
@@ -545,19 +687,8 @@
     __ j(greater_equal, &loop_header, Label::kNear);
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load accumulator, register file, bytecode offset, dispatch table into
-  // registers.
+  // Load accumulator and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ movp(kInterpreterRegisterFileRegister, rbp);
-  __ addp(kInterpreterRegisterFileRegister,
-          Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ movp(kInterpreterBytecodeOffsetRegister,
-          Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
   __ Move(
       kInterpreterDispatchTableRegister,
       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
@@ -567,13 +698,23 @@
                           kInterpreterBytecodeOffsetRegister, times_1, 0));
   __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
                        times_pointer_size, 0));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ call(rbx);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+  // The return value is in rax.
+
+  // Get the arguments + reciever count.
+  __ movp(rbx, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ movl(rbx, FieldOperand(rbx, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments and return.
+  __ PopReturnAddressTo(rcx);
+  __ addp(rsp, rbx);
+  __ PushReturnAddressFrom(rcx);
+  __ ret(0);
 
   // Load debug copy of the bytecode array.
   __ bind(&load_debug_bytecode_array);
@@ -582,31 +723,20 @@
   __ movp(kInterpreterBytecodeArrayRegister,
           FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
   __ jmp(&bytecode_array_loaded);
+
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ bind(&bytecode_array_not_present);
+  __ leave();  // Leave the frame so we can tail call.
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+  __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+  __ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
+  __ RecordWriteCodeEntryField(rdi, rcx, r15);
+  __ jmp(rcx);
 }
 
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
-
-  // The return value is in accumulator, which is already in rax.
-
-  // Leave the frame (also dropping the register file).
-  __ leave();
-
-  // Drop receiver + arguments and return.
-  __ movl(rbx, FieldOperand(kInterpreterBytecodeArrayRegister,
-                            BytecodeArray::kParameterSizeOffset));
-  __ PopReturnAddressTo(rcx);
-  __ addp(rsp, rbx);
-  __ PushReturnAddressFrom(rcx);
-  __ ret(0);
-}
-
-
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
                                          bool push_receiver) {
   // ----------- S t a t e -------------
@@ -637,7 +767,6 @@
   __ j(greater, &loop_header, Label::kNear);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -661,7 +790,6 @@
           RelocInfo::CODE_TARGET);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -689,26 +817,25 @@
   __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ Move(rbx, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
+                         Code::kHeaderSize - kHeapObjectTag));
+  __ Push(rbx);
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register and dispatch table register.
-  __ movp(kInterpreterRegisterFileRegister, rbp);
-  __ addp(kInterpreterRegisterFileRegister,
-          Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  // Initialize dispatch table register.
   __ Move(
       kInterpreterDispatchTableRegister,
       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
 
-  // Get the context from the frame.
-  __ movp(kContextRegister,
-          Operand(kInterpreterRegisterFileRegister,
-                  InterpreterFrameConstants::kContextFromRegisterPointer));
-
   // Get the bytecode array pointer from the frame.
-  __ movp(
-      kInterpreterBytecodeArrayRegister,
-      Operand(kInterpreterRegisterFileRegister,
-              InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+  __ movp(kInterpreterBytecodeArrayRegister,
+          Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -719,10 +846,8 @@
   }
 
   // Get the target bytecode offset from the frame.
-  __ movp(
-      kInterpreterBytecodeOffsetRegister,
-      Operand(kInterpreterRegisterFileRegister,
-              InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+  __ movp(kInterpreterBytecodeOffsetRegister,
+          Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
                     kInterpreterBytecodeOffsetRegister);
 
@@ -731,66 +856,144 @@
                           kInterpreterBytecodeOffsetRegister, times_1, 0));
   __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
                        times_pointer_size, 0));
-  __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ jmp(rbx);
 }
 
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ Push(Smi::FromInt(static_cast<int>(type)));
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register and push PC at top
-  // of stack (to simulate initial call to bytecode handler in interpreter entry
-  // trampoline).
-  __ Pop(rbx);
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-  __ Push(rbx);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : argument count (preserved for callee)
+  //  -- rdx : new target (preserved for callee)
+  //  -- rdi : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = rdi;
+  Register map = r8;
+  Register index = r9;
+  __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
+  __ cmpl(index, Immediate(2));
+  __ j(less, &gotta_call_runtime);
+
+  // Find literals.
+  // r14 : native context
+  // r9  : length / index
+  // r8  : optimized code map
+  // rdx : new target
+  // rdi : closure
+  Register native_context = r14;
+  __ movp(native_context, NativeContextOperand());
+
+  __ bind(&loop_top);
+  // Native context match?
+  Register temp = r11;
+  __ movp(temp, FieldOperand(map, index, times_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousContext));
+  __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ cmpp(temp, native_context);
+  __ j(not_equal, &loop_bottom);
+  // OSR id set to none?
+  __ movp(temp, FieldOperand(map, index, times_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  __ SmiToInteger32(temp, temp);
+  const int bailout_id = BailoutId::None().ToInt();
+  __ cmpl(temp, Immediate(bailout_id));
+  __ j(not_equal, &loop_bottom);
+  // Literals available?
+  __ movp(temp, FieldOperand(map, index, times_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
+  __ movp(r15, index);
+  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r15,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+  // Code available?
+  Register entry = rcx;
+  __ movp(entry, FieldOperand(map, index, times_pointer_size,
+                              SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, r15);
+
+  // Link the closure into the optimized function list.
+  // rcx : code entry (entry)
+  // r14 : native context
+  // rdx : new target
+  // rdi : closure
+  __ movp(rbx,
+          ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, r15,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+          closure);
+  // Save closure before the write barrier.
+  __ movp(rbx, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, r15,
+                            kDontSaveFPRegs);
+  __ movp(closure, rbx);
+  __ jmp(entry);
+
+  __ bind(&loop_bottom);
+  __ subl(index, Immediate(SharedFunctionInfo::kEntryLength));
+  __ cmpl(index, Immediate(1));
+  __ j(greater, &loop_top);
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
+                                       SharedFunctionInfo::kSharedCodeIndex));
+  __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  // Is the full code valid?
+  __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
+  __ andl(rbx, Immediate(Code::KindField::kMask));
+  __ shrl(rbx, Immediate(Code::KindField::kShift));
+  __ cmpl(rbx, Immediate(Code::BUILTIN));
+  __ j(equal, &gotta_call_runtime);
+  // Yes, install the full code.
+  __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, r15);
+  __ jmp(entry);
+
+  __ bind(&gotta_call_runtime);
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
@@ -929,13 +1132,16 @@
 
   // Switch on the state.
   Label not_no_registers, not_tos_rax;
-  __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+  __ cmpp(kScratchRegister,
+          Immediate(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
   __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
   __ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
-  __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+  __ cmpp(kScratchRegister,
+          Immediate(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
   __ j(not_equal, &not_tos_rax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, rax.
 
@@ -1010,29 +1216,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax     : argc
-  //  -- rsp[0]  : return address
-  //  -- rsp[8]  : first argument (left-hand side)
-  //  -- rsp[16] : receiver (right-hand side)
-  // -----------------------------------
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ movp(InstanceOfDescriptor::LeftRegister(),
-            Operand(rbp, 2 * kPointerSize));  // Load left-hand side.
-    __ movp(InstanceOfDescriptor::RightRegister(),
-            Operand(rbp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ ret(2 * kPointerSize);
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax     : argc
@@ -1789,6 +1972,34 @@
   __ PushReturnAddressFrom(rcx);
 }
 
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdx    : requested object size (untagged)
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ Integer32ToSmi(rdx, rdx);
+  __ PopReturnAddressTo(rcx);
+  __ Push(rdx);
+  __ PushReturnAddressFrom(rcx);
+  __ Move(rsi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdx    : requested object size (untagged)
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ Integer32ToSmi(rdx, rdx);
+  __ PopReturnAddressTo(rcx);
+  __ Push(rdx);
+  __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ PushReturnAddressFrom(rcx);
+  __ Move(rsi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index e737801..602d3a0 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -53,12 +53,6 @@
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -71,11 +65,6 @@
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -2013,125 +2002,6 @@
 }
 
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = rdx;              // Object (lhs).
-  Register const function = rax;            // Function (rhs).
-  Register const object_map = rcx;          // Map of {object}.
-  Register const function_map = r8;         // Map of {function}.
-  Register const function_prototype = rdi;  // Prototype of {function}.
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi, Label::kNear);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
-  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ j(not_equal, &fast_case, Label::kNear);
-  __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-  __ j(not_equal, &fast_case, Label::kNear);
-  __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
-  __ ret(0);
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
-  __ j(not_equal, &slow_case);
-  __ LoadRoot(rax, Heap::kFalseValueRootIndex);
-  __ ret(0);
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
-  __ j(not_equal, &slow_case);
-
-  // Go to the runtime if the function is not a constructor.
-  __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsConstructor));
-  __ j(zero, &slow_case);
-
-  // Ensure that {function} has an instance prototype.
-  __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
-           Immediate(1 << Map::kHasNonInstancePrototype));
-  __ j(not_zero, &slow_case);
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ movp(function_prototype,
-          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  Register const function_prototype_map = kScratchRegister;
-  __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
-  __ j(not_equal, &function_prototype_valid, Label::kNear);
-  __ movp(function_prototype,
-          FieldOperand(function_prototype, Map::kPrototypeOffset));
-  __ bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Label done, loop, fast_runtime_fallback;
-  __ LoadRoot(rax, Heap::kTrueValueRootIndex);
-  __ bind(&loop);
-
-  __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &fast_runtime_fallback, Label::kNear);
-  __ CmpInstanceType(object_map, JS_PROXY_TYPE);
-  __ j(equal, &fast_runtime_fallback, Label::kNear);
-
-  __ movp(object, FieldOperand(object_map, Map::kPrototypeOffset));
-  __ cmpp(object, function_prototype);
-  __ j(equal, &done, Label::kNear);
-  __ CompareRoot(object, Heap::kNullValueRootIndex);
-  __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
-  __ j(not_equal, &loop);
-  __ LoadRoot(rax, Heap::kFalseValueRootIndex);
-  __ bind(&done);
-  __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
-  __ ret(0);
-
-  // Found Proxy or access check needed: Call the runtime.
-  __ bind(&fast_runtime_fallback);
-  __ PopReturnAddressTo(kScratchRegister);
-  __ Push(object);
-  __ Push(function_prototype);
-  __ PushReturnAddressFrom(kScratchRegister);
-  // Invalidate the instanceof cache.
-  __ Move(rax, Smi::FromInt(0));
-  __ StoreRoot(rax, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ PopReturnAddressTo(kScratchRegister);
-  __ Push(object);
-  __ Push(function);
-  __ PushReturnAddressFrom(kScratchRegister);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
-
 // -------------------------------------------------------------------------
 // StringCharCodeAtGenerator
 
@@ -3735,8 +3605,8 @@
   __ bind(&not_array);
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ j(not_equal, &miss);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
       masm, Code::LOAD_IC, code_flags, receiver, name, feedback, no_reg);
 
@@ -3877,8 +3747,8 @@
   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
   __ j(not_equal, &miss);
 
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
                                                receiver, key, feedback, no_reg);
 
@@ -4449,15 +4319,15 @@
   __ bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ movp(Operand(rax, JSObject::kMapOffset), rcx);
+  __ movp(FieldOperand(rax, JSObject::kMapOffset), rcx);
   __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
-  __ movp(Operand(rax, JSObject::kPropertiesOffset), rbx);
-  __ movp(Operand(rax, JSObject::kElementsOffset), rbx);
+  __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+  __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ leap(rbx, Operand(rax, JSObject::kHeaderSize));
+  __ leap(rbx, FieldOperand(rax, JSObject::kHeaderSize));
 
   // ----------- S t a t e -------------
-  //  -- rax    : result (untagged)
+  //  -- rax    : result (tagged)
   //  -- rbx    : result fields (untagged)
   //  -- rdi    : result end (untagged)
   //  -- rcx    : initial map
@@ -4475,10 +4345,6 @@
   {
     // Initialize all in-object fields with undefined.
     __ InitializeFieldsWithFiller(rbx, rdi, r11);
-
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ incp(rax);
     __ Ret();
   }
   __ bind(&slack_tracking);
@@ -4498,10 +4364,6 @@
     __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
     __ InitializeFieldsWithFiller(rdx, rdi, r11);
 
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ incp(rax);
-
     // Check if we can finalize the instance size.
     Label finalize;
     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4532,10 +4394,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(rcx);
   }
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ decp(rax);
   __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
   __ leap(rdi, Operand(rax, rbx, times_pointer_size, 0));
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ decp(rdi);  // Remove the tag from the end address.
   __ jmp(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4557,19 +4419,19 @@
   // -----------------------------------
   __ AssertFunction(rdi);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make rdx point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ movp(rdx, rbp);
-    __ jmp(&loop_entry, Label::kNear);
-    __ bind(&loop);
+  // Make rdx point to the JavaScript frame.
+  __ movp(rdx, rbp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
-    __ j(not_equal, &loop);
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4601,7 +4463,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the rest parameter array in rax.
@@ -4642,7 +4504,7 @@
     Label allocate, done_allocate;
     __ leal(rcx, Operand(rax, times_pointer_size,
                          JSArray::kSize + FixedArray::kHeaderSize));
-    __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+    __ Allocate(rcx, rdx, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Compute the arguments.length in rdi.
@@ -4709,11 +4571,26 @@
   // -----------------------------------
   __ AssertFunction(rdi);
 
+  // Make r9 point to the JavaScript frame.
+  __ movp(r9, rbp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ movp(r9, Operand(r9, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ cmpp(rdi, Operand(r9, StandardFrameConstants::kFunctionOffset));
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
+  }
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
   __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
   __ LoadSharedFunctionInfoSpecialField(
       rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
-  __ leap(rdx, Operand(rbp, rcx, times_pointer_size,
+  __ leap(rdx, Operand(r9, rcx, times_pointer_size,
                        StandardFrameConstants::kCallerSPOffset));
   __ Integer32ToSmi(rcx, rcx);
 
@@ -4721,6 +4598,7 @@
   // rdx : parameters pointer
   // rdi : function
   // rsp[0] : return address
+  // r9  : JavaScript frame pointer.
   // Registers used over the whole function:
   //  rbx: the mapped parameter count (untagged)
   //  rax: the allocated object (tagged).
@@ -4731,7 +4609,7 @@
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
-  __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ movp(rax, Operand(r9, StandardFrameConstants::kCallerFPOffset));
   __ movp(r8, Operand(rax, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor_frame);
@@ -4774,7 +4652,7 @@
   __ addp(r8, Immediate(JSSloppyArgumentsObject::kSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
+  __ Allocate(r8, rax, r9, no_reg, &runtime, NO_ALLOCATION_FLAGS);
 
   // rax = address of new object(s) (tagged)
   // r11 = argument count (untagged)
@@ -4927,19 +4805,19 @@
   // -----------------------------------
   __ AssertFunction(rdi);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make rdx point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ movp(rdx, rbp);
-    __ jmp(&loop_entry, Label::kNear);
-    __ bind(&loop);
+  // Make rdx point to the JavaScript frame.
+  __ movp(rdx, rbp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
-    __ j(not_equal, &loop);
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -4978,7 +4856,7 @@
   Label allocate, done_allocate;
   __ leal(rcx, Operand(rax, times_pointer_size, JSStrictArgumentsObject::kSize +
                                                     FixedArray::kHeaderSize));
-  __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+  __ Allocate(rcx, rdx, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Compute the arguments.length in rdi.
@@ -5404,10 +5282,14 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
 
   __ PopReturnAddressTo(return_address);
 
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
+
   // context save
   __ Push(context);
 
@@ -5441,7 +5323,7 @@
 
   // Allocate the v8::Arguments structure in the arguments' space since
   // it's not controlled by GC.
-  const int kApiStackSpace = 4;
+  const int kApiStackSpace = 3;
 
   PrepareCallApiFunction(masm, kApiStackSpace);
 
@@ -5453,8 +5335,6 @@
   __ movp(StackSpaceOperand(1), scratch);
   // FunctionCallbackInfo::length_.
   __ Set(StackSpaceOperand(2), argc);
-  // FunctionCallbackInfo::is_construct_call_.
-  __ Set(StackSpaceOperand(3), 0);
 
 #if defined(__MINGW64__) || defined(_WIN64)
   Register arguments_arg = rcx;
@@ -5479,11 +5359,11 @@
                                        ARGUMENTS_DONT_CONTAIN_RECEIVER);
   Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
       FCA::kArgsLength - FCA::kContextSaveIndex);
-  Operand is_construct_call_operand = StackSpaceOperand(3);
+  Operand length_operand = StackSpaceOperand(2);
   Operand return_value_operand = args_from_rbp.GetArgumentOperand(
       this->is_store() ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
   int stack_space = 0;
-  Operand* stack_space_operand = &is_construct_call_operand;
+  Operand* stack_space_operand = &length_operand;
   stack_space = argc + FCA::kArgsLength + 1;
   stack_space_operand = nullptr;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
@@ -5493,14 +5373,6 @@
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rsp[0]                          : return address
-  //  -- rsp[8]                          : name
-  //  -- rsp[16 .. (16 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- r8                              : api_function_address
-  // -----------------------------------
-
 #if defined(__MINGW64__) || defined(_WIN64)
   Register getter_arg = r8;
   Register accessor_info_arg = rdx;
@@ -5510,9 +5382,36 @@
   Register accessor_info_arg = rsi;
   Register name_arg = rdi;
 #endif
-  Register api_function_address = ApiGetterDescriptor::function_address();
-  DCHECK(api_function_address.is(r8));
+  Register api_function_address = r8;
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
   Register scratch = rax;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+  // Insert additional parameters into the stack frame above return address.
+  __ PopReturnAddressTo(scratch);
+  __ Push(receiver);
+  __ Push(FieldOperand(callback, AccessorInfo::kDataOffset));
+  __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+  __ Push(kScratchRegister);  // return value
+  __ Push(kScratchRegister);  // return value default
+  __ PushAddress(ExternalReference::isolate_address(isolate()));
+  __ Push(holder);
+  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ Push(FieldOperand(callback, AccessorInfo::kNameOffset));
+  __ PushReturnAddressFrom(scratch);
 
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5539,8 +5438,11 @@
 
   // It's okay if api_function_address == getter_arg
   // but not accessor_info_arg or name_arg
-  DCHECK(!api_function_address.is(accessor_info_arg) &&
-         !api_function_address.is(name_arg));
+  DCHECK(!api_function_address.is(accessor_info_arg));
+  DCHECK(!api_function_address.is(name_arg));
+  __ movp(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+  __ movp(api_function_address,
+          FieldOperand(scratch, Foreign::kForeignAddressOffset));
 
   // +3 is to skip prolog, return address and name handle.
   Operand return_value_operand(
@@ -5550,7 +5452,6 @@
                            NULL);
 }
 
-
 #undef __
 
 }  // namespace internal
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 33e987e..114cbdc 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -204,7 +204,7 @@
   // Allocate new backing store.
   __ bind(&new_backing_store);
   __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
-  __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
+  __ Allocate(rdi, r14, r11, r15, fail, NO_ALLOCATION_FLAGS);
   // Set backing store's map
   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
   __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
@@ -296,7 +296,7 @@
   // r8 : source FixedDoubleArray
   // r9 : number of elements
   __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
-  __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+  __ Allocate(rdi, r11, r14, r15, &gc_required, NO_ALLOCATION_FLAGS);
   // r11: destination FixedArray
   __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
   __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index a9532dc..7126b89 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -8,6 +8,7 @@
 
 #if V8_TARGET_ARCH_X64
 
+#include "src/base/compiler-specific.h"
 #include "src/base/lazy-instance.h"
 #include "src/disasm.h"
 
@@ -359,7 +360,7 @@
   bool vex_128() {
     DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
     byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
-    return (checked & 4) != 1;
+    return (checked & 4) == 0;
   }
 
   bool vex_none() {
@@ -479,7 +480,7 @@
   int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
   int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
   int AVXInstruction(byte* data);
-  void AppendToBuffer(const char* format, ...);
+  PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
 
   void UnimplementedInstruction() {
     if (abort_on_unimplemented_) {
@@ -618,7 +619,7 @@
       value = 0;  // Initialize variables on all paths to satisfy the compiler.
       count = 0;
   }
-  AppendToBuffer("%" V8_PTR_PREFIX "x", value);
+  AppendToBuffer("%" PRIx64, value);
   return count;
 }
 
@@ -1999,7 +2000,7 @@
           if (rex_w()) AppendToBuffer("REX.W ");
           AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
         } else {
-          AppendToBuffer("%s", idesc.mnem, operand_size_code());
+          AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
         }
         data++;
         break;
@@ -2141,9 +2142,11 @@
           default:
             mnem = "???";
         }
-        AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "),
-                       mnem,
-                       operand_size_code());
+        if (regop <= 1) {
+          AppendToBuffer("%s%c ", mnem, operand_size_code());
+        } else {
+          AppendToBuffer("%s ", mnem);
+        }
         data += PrintRightOperand(data);
       }
         break;
@@ -2334,9 +2337,7 @@
           default:
             UNREACHABLE();
         }
-        AppendToBuffer("test%c rax,0x%" V8_PTR_PREFIX "x",
-                       operand_size_code(),
-                       value);
+        AppendToBuffer("test%c rax,0x%" PRIx64, operand_size_code(), value);
         break;
       }
       case 0xD1:  // fall through
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
index b10b522..e1e7f9c 100644
--- a/src/x64/interface-descriptors-x64.cc
+++ b/src/x64/interface-descriptors-x64.cc
@@ -46,16 +46,11 @@
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
 
 
-const Register InstanceOfDescriptor::LeftRegister() { return rdx; }
-const Register InstanceOfDescriptor::RightRegister() { return rax; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return rdx; }
 const Register StringCompareDescriptor::RightRegister() { return rax; }
 
-
-const Register ApiGetterDescriptor::function_address() { return r8; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return rcx; }
+const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
 
 const Register MathPowTaggedDescriptor::exponent() { return rdx; }
 
@@ -68,6 +63,8 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return rax; }
+const Register HasPropertyDescriptor::KeyRegister() { return rbx; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -243,13 +240,16 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  // register state
+  // rax -- number of arguments
+  // rdi -- function
+  // rbx -- allocation site with elements kind
+  Register registers[] = {rdi, rbx, rax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
@@ -313,6 +313,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -373,9 +378,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
-      kInterpreterDispatchTableRegister};
+      kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+      kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -410,6 +414,16 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      rax,  // the value to pass to the generator
+      rbx,  // the JSGeneratorObject to resume
+      rdx   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 566091d..2efb529 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -489,7 +489,7 @@
   // easier.
   DCHECK(js_function.is(rdi));
   DCHECK(code_entry.is(rcx));
-  DCHECK(scratch.is(rax));
+  DCHECK(scratch.is(r15));
 
   // Since a code entry (value) is always in old space, we don't need to update
   // remembered set. If incremental marking is off, there is nothing for us to
@@ -537,13 +537,13 @@
     DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
 
     movp(arg_reg_1, js_function);  // rcx gets rdi.
-    movp(arg_reg_2, dst);          // rdx gets rax.
+    movp(arg_reg_2, dst);          // rdx gets r15.
   } else {
     // AMD64 calling convention.
     DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
 
     // rdi is already loaded with js_function.
-    movp(arg_reg_2, dst);  // rsi gets rax.
+    movp(arg_reg_2, dst);  // rsi gets r15.
   }
   Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
 
@@ -1116,15 +1116,6 @@
   }
 }
 
-void MacroAssembler::Set(Register dst, int64_t x, RelocInfo::Mode rmode) {
-  if (rmode == RelocInfo::WASM_MEMORY_REFERENCE) {
-    DCHECK(x != 0);
-    movq(dst, x, rmode);
-  } else {
-    DCHECK(RelocInfo::IsNone(rmode));
-  }
-}
-
 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
   if (kPointerSize == kInt64Size) {
     if (is_int32(x)) {
@@ -3970,6 +3961,16 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    testb(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+    Push(object);
+    CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
+    Pop(object);
+    Check(equal, kOperandIsNotAGeneratorObject);
+  }
+}
 
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
@@ -4829,7 +4830,7 @@
     Label aligned;
     testl(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
-    if ((flags & PRETENURE) != 0) {
+    if (((flags & ALLOCATION_FOLDED) == 0) && ((flags & PRETENURE) != 0)) {
       ExternalReference allocation_limit =
           AllocationUtils::GetAllocationLimitReference(isolate(), flags);
       cmpp(result, ExternalOperand(allocation_limit));
@@ -4872,6 +4873,7 @@
                               AllocationFlags flags) {
   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -4905,23 +4907,19 @@
     movp(top_reg, result);
   }
   addp(top_reg, Immediate(object_size));
-  j(carry, gc_required);
   Operand limit_operand = ExternalOperand(allocation_limit);
   cmpp(top_reg, limit_operand);
   j(above, gc_required);
 
-  // Update allocation top.
-  UpdateAllocationTopHelper(top_reg, scratch, flags);
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    UpdateAllocationTopHelper(top_reg, scratch, flags);
+  }
 
-  bool tag_result = (flags & TAG_OBJECT) != 0;
   if (top_reg.is(result)) {
-    if (tag_result) {
-      subp(result, Immediate(object_size - kHeapObjectTag));
-    } else {
-      subp(result, Immediate(object_size));
-    }
-  } else if (tag_result) {
-    // Tag the result if requested.
+    subp(result, Immediate(object_size - kHeapObjectTag));
+  } else {
+    // Tag the result.
     DCHECK(kHeapObjectTag == 1);
     incp(result);
   }
@@ -4937,6 +4935,8 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & SIZE_IN_WORDS) == 0);
+  DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   leap(result_end, Operand(element_count, element_size, header_size));
   Allocate(result_end, result, result_end, scratch, gc_required, flags);
 }
@@ -4949,6 +4949,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & SIZE_IN_WORDS) == 0);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -4971,34 +4972,66 @@
     MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
   }
 
-  // Calculate new top and bail out if new space is exhausted.
   ExternalReference allocation_limit =
       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   if (!object_size.is(result_end)) {
     movp(result_end, object_size);
   }
   addp(result_end, result);
-  j(carry, gc_required);
   Operand limit_operand = ExternalOperand(allocation_limit);
   cmpp(result_end, limit_operand);
   j(above, gc_required);
 
-  // Update allocation top.
-  UpdateAllocationTopHelper(result_end, scratch, flags);
-
-  // Tag the result if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    addp(result, Immediate(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    UpdateAllocationTopHelper(result_end, scratch, flags);
   }
+
+  // Tag the result.
+  addp(result, Immediate(kHeapObjectTag));
 }
 
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register result_end, AllocationFlags flags) {
+  DCHECK(!result.is(result_end));
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, no_reg, flags);
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
+  }
+
+  leap(result_end, Operand(result, object_size));
+
+  UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+  addp(result, Immediate(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, AllocationFlags flags) {
+  DCHECK(!result.is(result_end));
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, no_reg, flags);
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
+  }
+
+  leap(result_end, Operand(result, object_size, times_1, 0));
+
+  UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+  addp(result, Immediate(kHeapObjectTag));
+}
 
 void MacroAssembler::AllocateHeapNumber(Register result,
                                         Register scratch,
                                         Label* gc_required,
                                         MutableMode mode) {
   // Allocate heap number in new space.
-  Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+  Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   Heap::RootListIndex map_index = mode == MUTABLE
       ? Heap::kMutableHeapNumberMapRootIndex
@@ -5030,14 +5063,8 @@
   }
 
   // Allocate two byte string in new space.
-  Allocate(SeqTwoByteString::kHeaderSize,
-           times_1,
-           scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1, result, scratch2,
+           scratch3, gc_required, NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
@@ -5066,14 +5093,8 @@
   }
 
   // Allocate one-byte string in new space.
-  Allocate(SeqOneByteString::kHeaderSize,
-           times_1,
-           scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1, result, scratch2,
+           scratch3, gc_required, NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
@@ -5091,7 +5112,7 @@
                                         Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
@@ -5103,12 +5124,8 @@
                                                Register scratch1,
                                                Register scratch2,
                                                Label* gc_required) {
-  Allocate(ConsString::kSize,
-           result,
-           scratch1,
-           scratch2,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
@@ -5122,7 +5139,7 @@
                                           Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
@@ -5136,7 +5153,7 @@
                                                  Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
@@ -5152,7 +5169,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index af3dd03..013d0f1 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -21,8 +21,8 @@
 const Register kReturnRegister2 = {Register::kCode_r8};
 const Register kJSFunctionRegister = {Register::kCode_rdi};
 const Register kContextRegister = {Register::kCode_rsi};
+const Register kAllocateSizeRegister = {Register::kCode_rdx};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r11};
 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
 const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
@@ -818,7 +818,6 @@
 
   // Load a register with a long value as efficiently as possible.
   void Set(Register dst, int64_t x);
-  void Set(Register dst, int64_t x, RelocInfo::Mode rmode);
   void Set(const Operand& dst, intptr_t x);
 
   void Cvtss2sd(XMMRegister dst, XMMRegister src);
@@ -1224,6 +1223,10 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
   void AssertReceiver(Register object);
 
@@ -1304,6 +1307,15 @@
                 Label* gc_required,
                 AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(int object_size, Register result, Register result_end,
+                    AllocationFlags flags);
+
+  void FastAllocate(Register object_size, Register result, Register result_end,
+                    AllocationFlags flags);
+
   // Allocate a heap number in new space with undefined value. Returns
   // tagged pointer in result register, or jumps to gc_required if new
   // space is full.
diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h
index 802c80f..ba2a864 100644
--- a/src/x87/assembler-x87-inl.h
+++ b/src/x87/assembler-x87-inl.h
@@ -81,11 +81,6 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
-Address RelocInfo::wasm_memory_reference() {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  return Memory::Address_at(pc_);
-}
-
 Address RelocInfo::target_address_address() {
   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
@@ -120,21 +115,6 @@
   }
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, size_t old_size, size_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_));
-  DCHECK(old_base <= wasm_memory_reference() &&
-         wasm_memory_reference() < old_base + old_size);
-  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
-  DCHECK(new_base <= updated_reference &&
-         updated_reference < new_base + new_size);
-  Memory::Address_at(pc_) = updated_reference;
-  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
-  }
-}
-
 Object* RelocInfo::target_object() {
   DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Memory::Object_at(pc_);
@@ -284,7 +264,7 @@
   }
 }
 
-
+template <typename ObjectVisitor>
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index e74d770..5cc783c 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -101,6 +101,42 @@
   return false;
 }
 
+Address RelocInfo::wasm_memory_reference() {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  return Memory::Address_at(pc_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  return Memory::uint32_at(pc_);
+}
+
+void RelocInfo::update_wasm_memory_reference(
+    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+    ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+  if (IsWasmMemoryReference(rmode_)) {
+    Address updated_reference;
+    DCHECK(old_base <= wasm_memory_reference() &&
+           wasm_memory_reference() < old_base + old_size);
+    updated_reference = new_base + (wasm_memory_reference() - old_base);
+    DCHECK(new_base <= updated_reference &&
+           updated_reference < new_base + new_size);
+    Memory::Address_at(pc_) = updated_reference;
+  } else if (IsWasmMemorySizeReference(rmode_)) {
+    uint32_t updated_size_reference;
+    DCHECK(wasm_memory_size_reference() <= old_size);
+    updated_size_reference =
+        new_size + (wasm_memory_size_reference() - old_size);
+    DCHECK(updated_size_reference <= new_size);
+    Memory::uint32_at(pc_) = updated_size_reference;
+  } else {
+    UNREACHABLE();
+  }
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+  }
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of Operand
@@ -552,6 +588,18 @@
   emit_operand(dst, src);
 }
 
+void Assembler::xchg_b(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x86);
+  emit_operand(reg, op);
+}
+
+void Assembler::xchg_w(Register reg, const Operand& op) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x66);
+  EMIT(0x87);
+  emit_operand(reg, op);
+}
 
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index 96eced9..eaf28e9 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -74,6 +74,8 @@
   V(stX_6)                  \
   V(stX_7)
 
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+
 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
   V(stX_0)                              \
   V(stX_1)                              \
@@ -145,8 +147,7 @@
 #undef DECLARE_REGISTER
 const Register no_reg = {Register::kCode_no_reg};
 
-
-struct DoubleRegister {
+struct X87Register {
   enum Code {
 #define REGISTER_CODE(R) kCode_##R,
     DOUBLE_REGISTERS(REGISTER_CODE)
@@ -158,8 +159,8 @@
   static const int kMaxNumRegisters = Code::kAfterLast;
   static const int kMaxNumAllocatableRegisters = 6;
 
-  static DoubleRegister from_code(int code) {
-    DoubleRegister result = {code};
+  static X87Register from_code(int code) {
+    X87Register result = {code};
     return result;
   }
 
@@ -171,24 +172,26 @@
     return reg_code;
   }
 
-  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+  bool is(X87Register reg) const { return reg_code == reg.reg_code; }
 
   const char* ToString();
 
   int reg_code;
 };
 
+typedef X87Register FloatRegister;
+
+typedef X87Register DoubleRegister;
+
+// TODO(x87) Define SIMD registers.
+typedef X87Register Simd128Register;
+
 #define DECLARE_REGISTER(R) \
   const DoubleRegister R = {DoubleRegister::kCode_##R};
 DOUBLE_REGISTERS(DECLARE_REGISTER)
 #undef DECLARE_REGISTER
 const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
 
-typedef DoubleRegister X87Register;
-
-// TODO(x87) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -648,6 +651,8 @@
   // Exchange
   void xchg(Register dst, Register src);
   void xchg(Register dst, const Operand& src);
+  void xchg_b(Register reg, const Operand& op);
+  void xchg_w(Register reg, const Operand& op);
 
   // Arithmetics
   void adc(Register dst, int32_t imm32);
@@ -958,7 +963,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, int raw_position);
+  void RecordDeoptReason(const int reason, int raw_position, int id);
 
   // Writes a single byte or word of data in the code stream.  Used for
   // inline tables, e.g., jump-tables.
diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc
index 9e13172..7018802 100644
--- a/src/x87/builtins-x87.cc
+++ b/src/x87/builtins-x87.cc
@@ -186,16 +186,9 @@
     __ j(greater_equal, &loop);
 
     // Call the function.
-    if (is_api_function) {
-      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      __ call(code, RelocInfo::CODE_TARGET);
-    } else {
-      ParameterCount actual(eax);
-      __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
-                        CheckDebugStepCallWrapper());
-    }
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+                      CheckDebugStepCallWrapper());
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
@@ -392,6 +385,141 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : the value to pass to the generator
+  //  -- ebx    : the JSGeneratorObject to resume
+  //  -- edx    : the resume mode (tagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertGeneratorObject(ebx);
+
+  // Store input value into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), eax);
+  __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, eax, ecx,
+                      kDontSaveFPRegs);
+
+  // Store resume mode into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
+
+  // Load suspended function and context.
+  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+  __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+  // Flood function if we are stepping.
+  Label skip_flooding;
+  ExternalReference step_in_enabled =
+      ExternalReference::debug_step_in_enabled_address(masm->isolate());
+  __ cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
+  __ j(equal, &skip_flooding);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(ebx);
+    __ Push(edx);
+    __ Push(edi);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(edx);
+    __ Pop(ebx);
+    __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  }
+  __ bind(&skip_flooding);
+
+  // Pop return address.
+  __ PopReturnAddressTo(eax);
+
+  // Push receiver.
+  __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+  // ----------- S t a t e -------------
+  //  -- eax    : return address
+  //  -- ebx    : the JSGeneratorObject to resume
+  //  -- edx    : the resume mode (tagged)
+  //  -- edi    : generator function
+  //  -- esi    : generator context
+  //  -- esp[0] : generator receiver
+  // -----------------------------------
+
+  // Push holes for arguments to generator function. Since the parser forced
+  // context allocation for any variables in generators, the actual argument
+  // values have already been copied into the context and these dummy values
+  // will never be used.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+  {
+    Label done_loop, loop;
+    __ bind(&loop);
+    __ sub(ecx, Immediate(Smi::FromInt(1)));
+    __ j(carry, &done_loop, Label::kNear);
+    __ PushRoot(Heap::kTheHoleValueRootIndex);
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Dispatch on the kind of generator object.
+  Label old_generator;
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+  __ j(not_equal, &old_generator);
+
+  // New-style (ignition/turbofan) generator object
+  {
+    __ PushReturnAddressFrom(eax);
+    __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(eax,
+           FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+    // We abuse new.target both to indicate that this is a resume call and to
+    // pass in the generator object.  In ordinary calls, new.target is always
+    // undefined because generator functions are non-constructable.
+    __ mov(edx, ebx);
+    __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Old-style (full-codegen) generator object
+  __ bind(&old_generator);
+  {
+    // Enter a new JavaScript frame, and initialize its slots as they were when
+    // the generator was suspended.
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ PushReturnAddressFrom(eax);  // Return address.
+    __ Push(ebp);                   // Caller's frame pointer.
+    __ Move(ebp, esp);
+    __ Push(esi);  // Callee's context.
+    __ Push(edi);  // Callee's JS Function.
+
+    // Restore the operand stack.
+    __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+    {
+      Label done_loop, loop;
+      __ Move(ecx, Smi::FromInt(0));
+      __ bind(&loop);
+      __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+      __ j(equal, &done_loop, Label::kNear);
+      __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+      __ add(ecx, Immediate(Smi::FromInt(1)));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Reset operand stack so we don't leak.
+    __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
+           Immediate(masm->isolate()->factory()->empty_fixed_array()));
+
+    // Resume the generator function at the continuation.
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(ecx);
+    __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
+    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ mov(eax, ebx);  // Continuation expects generator object in eax.
+    __ jmp(edx);
+  }
+}
 
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
@@ -408,6 +536,8 @@
 // The function builds an interpreter frame.  See InterpreterFrameConstants in
 // frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done below).
@@ -418,10 +548,9 @@
   __ push(edi);  // Callee's JS function.
   __ push(edx);  // Callee's new target.
 
-  // Get the bytecode array from the function object and load the pointer to the
-  // first entry into edi (InterpreterBytecodeRegister).
+  // Get the bytecode array from the function object (or from the DebugInfo if
+  // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-
   Label load_debug_bytecode_array, bytecode_array_loaded;
   __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
          Immediate(DebugInfo::uninitialized()));
@@ -430,8 +559,12 @@
          FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
 
+  // Check function data field is actually a BytecodeArray object.
+  Label bytecode_array_not_present;
+  __ CompareRoot(kInterpreterBytecodeArrayRegister,
+                 Heap::kUndefinedValueRootIndex);
+  __ j(equal, &bytecode_array_not_present);
   if (FLAG_debug_code) {
-    // Check function data field is actually a BytecodeArray object.
     __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
     __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
                      eax);
@@ -440,8 +573,8 @@
 
   // Push bytecode array.
   __ push(kInterpreterBytecodeArrayRegister);
-  // Push zero for bytecode array offset.
-  __ push(Immediate(0));
+  // Push Smi tagged initial bytecode array offset.
+  __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
 
   // Allocate the local and temporary register file on the stack.
   {
@@ -474,41 +607,36 @@
     __ j(greater_equal, &loop_header);
   }
 
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's prologue:
-  //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Code aging of the BytecodeArray object.
-
-  // Load accumulator, register file, bytecode offset, dispatch table into
-  // registers.
+  // Load accumulator, bytecode offset and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
-  __ mov(kInterpreterRegisterFileRegister, ebp);
-  __ add(kInterpreterRegisterFileRegister,
-         Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ mov(kInterpreterBytecodeOffsetRegister,
          Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
-                  masm->isolate())));
-
-  // Push dispatch table as a stack located parameter to the bytecode handler.
-  DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
-  __ push(ebx);
+  __ mov(kInterpreterDispatchTableRegister,
+         Immediate(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Dispatch to the first bytecode handler for the function.
-  __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
+  __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
                           kInterpreterBytecodeOffsetRegister, times_1, 0));
-  __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
-  // Restore undefined_value in accumulator (eax)
-  // TODO(rmcilroy): Remove this once we move the dispatch table back into a
-  // register.
-  __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+                      times_pointer_size, 0));
   __ call(ebx);
+  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
-  // Even though the first bytecode handler was called, we will never return.
-  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+  // The return value is in eax.
+
+  // Get the arguments + reciever count.
+  __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ mov(ebx, FieldOperand(ebx, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ leave();
+
+  // Drop receiver + arguments and return.
+  __ pop(ecx);
+  __ add(esp, ebx);
+  __ push(ecx);
+  __ ret(0);
 
   // Load debug copy of the bytecode array.
   __ bind(&load_debug_bytecode_array);
@@ -517,31 +645,23 @@
   __ mov(kInterpreterBytecodeArrayRegister,
          FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
   __ jmp(&bytecode_array_loaded);
+
+  // If the bytecode array is no longer present, then the underlying function
+  // has been switched to a different kind of code and we heal the closure by
+  // switching the code entry field over to the new code object as well.
+  __ bind(&bytecode_array_not_present);
+  __ pop(edx);  // Callee's new target.
+  __ pop(edi);  // Callee's JS function.
+  __ pop(esi);  // Callee's context.
+  __ leave();   // Leave the frame so we can tail call.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+  __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+  __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
+  __ RecordWriteCodeEntryField(edi, ecx, ebx);
+  __ jmp(ecx);
 }
 
-
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
-  // TODO(rmcilroy): List of things not currently dealt with here but done in
-  // fullcodegen's EmitReturnSequence.
-  //  - Supporting FLAG_trace for Runtime::TraceExit.
-  //  - Support profiler (specifically decrementing profiling_counter
-  //    appropriately and calling out to HandleInterrupts if necessary).
-
-  // The return value is in accumulator, which is already in rax.
-
-  // Leave the frame (also dropping the register file).
-  __ leave();
-
-  // Drop receiver + arguments and return.
-  __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
-                           BytecodeArray::kParameterSizeOffset));
-  __ pop(ecx);
-  __ add(esp, ebx);
-  __ push(ecx);
-  __ ret(0);
-}
-
-
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
                                          Register array_limit) {
   // ----------- S t a t e -------------
@@ -559,7 +679,6 @@
   __ j(greater, &loop_header, Label::kNear);
 }
 
-
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode) {
@@ -628,17 +747,26 @@
   __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the return address to the correct point in the interpreter entry
+  // trampoline.
+  Smi* interpreter_entry_return_pc_offset(
+      masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+  __ LoadHeapObject(ebx,
+                    masm->isolate()->builtins()->InterpreterEntryTrampoline());
+  __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
+                        Code::kHeaderSize - kHeapObjectTag));
+  __ push(ebx);
 
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
-  // Initialize register file register.
-  __ mov(kInterpreterRegisterFileRegister, ebp);
-  __ add(kInterpreterRegisterFileRegister,
-         Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+  // Initialize the dispatch table register.
+  __ mov(kInterpreterDispatchTableRegister,
+         Immediate(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Get the bytecode array pointer from the frame.
   __ mov(kInterpreterBytecodeArrayRegister,
-         Operand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+         Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -649,92 +777,173 @@
   }
 
   // Get the target bytecode offset from the frame.
-  __ mov(
-      kInterpreterBytecodeOffsetRegister,
-      Operand(kInterpreterRegisterFileRegister,
-              InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+  __ mov(kInterpreterBytecodeOffsetRegister,
+         Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
-  // Push dispatch table as a stack located parameter to the bytecode handler.
-  __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
-                  masm->isolate())));
-  DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
-  __ Pop(esi);
-  __ Push(ebx);
-  __ Push(esi);
-
   // Dispatch to the target bytecode.
-  __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+  __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
                           kInterpreterBytecodeOffsetRegister, times_1, 0));
-  __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
-
-  // Get the context from the frame.
-  __ mov(kContextRegister,
-         Operand(kInterpreterRegisterFileRegister,
-                 InterpreterFrameConstants::kContextFromRegisterPointer));
-
-  // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
-  // and header removal.
-  __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+                      times_pointer_size, 0));
   __ jmp(ebx);
 }
 
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Pass the deoptimization type to the runtime system.
-    __ Push(Smi::FromInt(static_cast<int>(type)));
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and and pop the
-  // accumulator value into the accumulator register and push PC at top
-  // of stack (to simulate initial call to bytecode handler in interpreter entry
-  // trampoline).
-  __ Pop(ebx);
-  __ Drop(1);
-  __ Pop(kInterpreterAccumulatorRegister);
-  __ Push(ebx);
-
-  // Enter the bytecode dispatch.
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
-  // Set the address of the interpreter entry trampoline as a return address.
-  // This simulates the initial call to bytecode handlers in interpreter entry
-  // trampoline. The return will never actually be taken, but our stack walker
-  // uses this address to determine whether a frame is interpreted.
-  __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
-  Generate_EnterBytecodeDispatch(masm);
-}
-
-
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
+  //  -- edx : new target (preserved for callee)
+  //  -- edi : target function (preserved for callee)
+  // -----------------------------------
+  // First lookup code, maybe we don't need to compile!
+  Label gotta_call_runtime, gotta_call_runtime_no_stack;
+  Label maybe_call_runtime;
+  Label try_shared;
+  Label loop_top, loop_bottom;
+
+  Register closure = edi;
+  Register new_target = edx;
+  Register argument_count = eax;
+
+  __ push(argument_count);
+  __ push(new_target);
+  __ push(closure);
+
+  Register map = argument_count;
+  Register index = ebx;
+  __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+  __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
+  __ cmp(index, Immediate(Smi::FromInt(2)));
+  __ j(less, &gotta_call_runtime);
+
+  // Find literals.
+  // edx : native context
+  // ebx : length / index
+  // eax : optimized code map
+  // stack[0] : new target
+  // stack[4] : closure
+  Register native_context = edx;
+  __ mov(native_context, NativeContextOperand());
+
+  __ bind(&loop_top);
+  Register temp = edi;
+
+  // Does the native context match?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousContext));
+  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ cmp(temp, native_context);
+  __ j(not_equal, &loop_bottom);
+  // OSR id set to none?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+  const int bailout_id = BailoutId::None().ToInt();
+  __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
+  __ j(not_equal, &loop_bottom);
+  // Literals available?
+  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+                            SharedFunctionInfo::kOffsetToPreviousLiterals));
+  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(temp, &gotta_call_runtime);
+
+  // Save the literals in the closure.
+  __ mov(ecx, Operand(esp, 0));
+  __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
+  __ push(index);
+  __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ pop(index);
+
+  // Code available?
+  Register entry = ecx;
+  __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
+                             SharedFunctionInfo::kOffsetToPreviousCachedCode));
+  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &maybe_call_runtime);
+
+  // Found literals and code. Get them into the closure and return.
+  __ pop(closure);
+  // Store code entry in the closure.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+
+  Label install_optimized_code_and_tailcall;
+  __ bind(&install_optimized_code_and_tailcall);
+  __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, eax);
+
+  // Link the closure into the optimized function list.
+  // ecx : code entry
+  // edx : native context
+  // edi : closure
+  __ mov(ebx,
+         ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+  __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
+  __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  const int function_list_offset =
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+  __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+         closure);
+  // Save closure before the write barrier.
+  __ mov(ebx, closure);
+  __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
+                            kDontSaveFPRegs);
+  __ mov(closure, ebx);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ jmp(entry);
+
+  __ bind(&loop_bottom);
+  __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ cmp(index, Immediate(Smi::FromInt(1)));
+  __ j(greater, &loop_top);
+
+  // We found neither literals nor code.
+  __ jmp(&gotta_call_runtime);
+
+  __ bind(&maybe_call_runtime);
+  __ pop(closure);
+
+  // Last possibility. Check the context free optimized code map entry.
+  __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
+                                      SharedFunctionInfo::kSharedCodeIndex));
+  __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+  __ JumpIfSmi(entry, &try_shared);
+
+  // Store code entry in the closure.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ jmp(&install_optimized_code_and_tailcall);
+
+  __ bind(&try_shared);
+  __ pop(new_target);
+  __ pop(argument_count);
+  // Is the full code valid?
+  __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+  __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
+  __ and_(ebx, Code::KindField::kMask);
+  __ shr(ebx, Code::KindField::kShift);
+  __ cmp(ebx, Immediate(Code::BUILTIN));
+  __ j(equal, &gotta_call_runtime_no_stack);
+  // Yes, install the full code.
+  __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+  __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+  __ RecordWriteCodeEntryField(closure, entry, ebx);
+  __ jmp(entry);
+
+  __ bind(&gotta_call_runtime);
+  __ pop(closure);
+  __ pop(new_target);
+  __ pop(argument_count);
+  __ bind(&gotta_call_runtime_no_stack);
+
   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
   GenerateTailCallToReturnedCode(masm,
@@ -876,13 +1085,14 @@
 
   // Switch on the state.
   Label not_no_registers, not_tos_eax;
-  __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+  __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
   __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
+  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
   __ mov(eax, Operand(esp, 2 * kPointerSize));
-  __ cmp(ecx, FullCodeGenerator::TOS_REG);
+  __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
   __ j(not_equal, &not_tos_eax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, eax.
 
@@ -958,29 +1168,6 @@
 }
 
 // static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : argc
-  //  -- esp[0] : return address
-  //  -- esp[4] : first argument (left-hand side)
-  //  -- esp[8] : receiver (right-hand side)
-  // -----------------------------------
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ mov(InstanceOfDescriptor::LeftRegister(),
-           Operand(ebp, 2 * kPointerSize));  // Load left-hand side.
-    __ mov(InstanceOfDescriptor::RightRegister(),
-           Operand(ebp, 3 * kPointerSize));  // Load right-hand side.
-    InstanceOfStub stub(masm->isolate(), true);
-    __ CallStub(&stub);
-  }
-
-  // Pop the argument and the receiver.
-  __ ret(2 * kPointerSize);
-}
-
-// static
 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax     : argc
@@ -2385,6 +2572,34 @@
           RelocInfo::CODE_TARGET);
 }
 
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : requested object size (untagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ SmiTag(edx);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edx    : requested object size (untagged)
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ SmiTag(edx);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edx);
+  __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+  __ PushReturnAddressFrom(ecx);
+  __ Move(esi, Smi::FromInt(0));
+  __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index 71adfd3..fdb97ee 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -62,12 +62,6 @@
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
@@ -80,11 +74,6 @@
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
@@ -1781,129 +1770,6 @@
 }
 
 
-void InstanceOfStub::Generate(MacroAssembler* masm) {
-  Register const object = edx;                       // Object (lhs).
-  Register const function = eax;                     // Function (rhs).
-  Register const object_map = ecx;                   // Map of {object}.
-  Register const function_map = ebx;                 // Map of {function}.
-  Register const function_prototype = function_map;  // Prototype of {function}.
-  Register const scratch = edi;
-
-  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
-  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
-  // Check if {object} is a smi.
-  Label object_is_smi;
-  __ JumpIfSmi(object, &object_is_smi, Label::kNear);
-
-  // Lookup the {function} and the {object} map in the global instanceof cache.
-  // Note: This is safe because we clear the global instanceof cache whenever
-  // we change the prototype of any object.
-  Label fast_case, slow_case;
-  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
-  __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ j(not_equal, &fast_case, Label::kNear);
-  __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
-  __ j(not_equal, &fast_case, Label::kNear);
-  __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
-  __ ret(0);
-
-  // If {object} is a smi we can safely return false if {function} is a JS
-  // function, otherwise we have to miss to the runtime and throw an exception.
-  __ bind(&object_is_smi);
-  __ JumpIfSmi(function, &slow_case);
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
-  __ j(not_equal, &slow_case);
-  __ LoadRoot(eax, Heap::kFalseValueRootIndex);
-  __ ret(0);
-
-  // Fast-case: The {function} must be a valid JSFunction.
-  __ bind(&fast_case);
-  __ JumpIfSmi(function, &slow_case);
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
-  __ j(not_equal, &slow_case);
-
-  // Go to the runtime if the function is not a constructor.
-  __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsConstructor));
-  __ j(zero, &slow_case);
-
-  // Ensure that {function} has an instance prototype.
-  __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
-            Immediate(1 << Map::kHasNonInstancePrototype));
-  __ j(not_zero, &slow_case);
-
-  // Get the "prototype" (or initial map) of the {function}.
-  __ mov(function_prototype,
-         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  __ AssertNotSmi(function_prototype);
-
-  // Resolve the prototype if the {function} has an initial map.  Afterwards the
-  // {function_prototype} will be either the JSReceiver prototype object or the
-  // hole value, which means that no instances of the {function} were created so
-  // far and hence we should return false.
-  Label function_prototype_valid;
-  Register const function_prototype_map = scratch;
-  __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
-  __ j(not_equal, &function_prototype_valid, Label::kNear);
-  __ mov(function_prototype,
-         FieldOperand(function_prototype, Map::kPrototypeOffset));
-  __ bind(&function_prototype_valid);
-  __ AssertNotSmi(function_prototype);
-
-  // Update the global instanceof cache with the current {object} map and
-  // {function}.  The cached answer will be set when it is known below.
-  __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
-
-  // Loop through the prototype chain looking for the {function} prototype.
-  // Assume true, and change to false if not found.
-  Label done, loop, fast_runtime_fallback;
-  __ mov(eax, isolate()->factory()->true_value());
-  __ bind(&loop);
-
-  // Check if the object needs to be access checked.
-  __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &fast_runtime_fallback, Label::kNear);
-  // Check if the current object is a Proxy.
-  __ CmpInstanceType(object_map, JS_PROXY_TYPE);
-  __ j(equal, &fast_runtime_fallback, Label::kNear);
-
-  __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
-  __ cmp(object, function_prototype);
-  __ j(equal, &done, Label::kNear);
-  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
-  __ cmp(object, isolate()->factory()->null_value());
-  __ j(not_equal, &loop);
-  __ mov(eax, isolate()->factory()->false_value());
-
-  __ bind(&done);
-  __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
-  __ ret(0);
-
-  // Found Proxy or access check needed: Call the runtime.
-  __ bind(&fast_runtime_fallback);
-  __ PopReturnAddressTo(scratch);
-  __ Push(object);
-  __ Push(function_prototype);
-  __ PushReturnAddressFrom(scratch);
-  // Invalidate the instanceof cache.
-  __ Move(eax, Immediate(Smi::FromInt(0)));
-  __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
-  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
-  // Slow-case: Call the %InstanceOf runtime function.
-  __ bind(&slow_case);
-  __ PopReturnAddressTo(scratch);
-  __ Push(object);
-  __ Push(function);
-  __ PushReturnAddressFrom(scratch);
-  __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
-                                         : Runtime::kInstanceOf);
-}
-
-
 // -------------------------------------------------------------------------
 // StringCharCodeAtGenerator
 
@@ -3534,8 +3400,8 @@
   __ j(not_equal, &miss);
   __ push(slot);
   __ push(vector);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::LOAD_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
                                                receiver, name, vector, scratch);
   __ pop(vector);
@@ -3795,8 +3661,8 @@
   __ pop(value);
   __ push(slot);
   __ push(vector);
-  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::STORE_IC));
+  Code::Flags code_flags =
+      Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
                                                receiver, key, slot, no_reg);
   __ pop(vector);
@@ -4386,16 +4252,16 @@
   __ bind(&done_allocate);
 
   // Initialize the JSObject fields.
-  __ mov(Operand(eax, JSObject::kMapOffset), ecx);
-  __ mov(Operand(eax, JSObject::kPropertiesOffset),
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
          masm->isolate()->factory()->empty_fixed_array());
-  __ mov(Operand(eax, JSObject::kElementsOffset),
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
          masm->isolate()->factory()->empty_fixed_array());
   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+  __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
 
   // ----------- S t a t e -------------
-  //  -- eax    : result (untagged)
+  //  -- eax    : result (tagged)
   //  -- ebx    : result fields (untagged)
   //  -- edi    : result end (untagged)
   //  -- ecx    : initial map
@@ -4413,10 +4279,6 @@
     // Initialize all in-object fields with undefined.
     __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
     __ InitializeFieldsWithFiller(ebx, edi, edx);
-
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ inc(eax);
     __ Ret();
   }
   __ bind(&slack_tracking);
@@ -4439,10 +4301,6 @@
     __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
     __ InitializeFieldsWithFiller(ebx, edx, edi);
 
-    // Add the object tag to make the JSObject real.
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    __ inc(eax);
-
     // Check if we can finalize the instance size.
     Label finalize;
     STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4473,10 +4331,10 @@
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ Pop(ecx);
   }
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ dec(eax);
   __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
   __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ dec(edi);
   __ jmp(&done_allocate);
 
   // Fall back to %NewObject.
@@ -4497,19 +4355,19 @@
   // -----------------------------------
   __ AssertFunction(edi);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make edx point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ mov(edx, ebp);
-    __ jmp(&loop_entry, Label::kNear);
-    __ bind(&loop);
+  // Make edx point to the JavaScript frame.
+  __ mov(edx, ebp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
-    __ j(not_equal, &loop);
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have rest parameters (only possible if we have an
@@ -4539,7 +4397,7 @@
 
     // Allocate an empty rest parameter array.
     Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+    __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the rest parameter array in rax.
@@ -4581,7 +4439,7 @@
     Label allocate, done_allocate;
     __ lea(ecx, Operand(eax, times_half_pointer_size,
                         JSArray::kSize + FixedArray::kHeaderSize));
-    __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+    __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in edx.
@@ -4643,35 +4501,50 @@
   // -----------------------------------
   __ AssertFunction(edi);
 
+  // Make ecx point to the JavaScript frame.
+  __ mov(ecx, ebp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
+    __ mov(ecx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+  }
+  if (FLAG_debug_code) {
+    Label ok;
+    __ cmp(edi, Operand(ecx, StandardFrameConstants::kFunctionOffset));
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewSloppyArgumentsStub);
+    __ bind(&ok);
+  }
+
   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ecx,
-         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx,
+         FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ lea(edx, Operand(ecx, ebx, times_half_pointer_size,
                       StandardFrameConstants::kCallerSPOffset));
 
-  // ecx : number of parameters (tagged)
+  // ebx : number of parameters (tagged)
   // edx : parameters pointer
   // edi : function
+  // ecx : JavaScript frame pointer.
   // esp[0] : return address
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ mov(eax, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+  __ mov(eax, Operand(eax, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // No adaptor, parameter count = argument count.
-  __ mov(ebx, ecx);
-  __ push(ecx);
+  __ mov(ecx, ebx);
+  __ push(ebx);
   __ jmp(&try_allocate, Label::kNear);
 
   // We have an adaptor frame. Patch the parameters pointer.
   __ bind(&adaptor_frame);
-  __ mov(ebx, ecx);
-  __ push(ecx);
-  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ push(ebx);
+  __ mov(edx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ lea(edx,
          Operand(edx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
@@ -4705,7 +4578,7 @@
   __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
 
   // Do the allocation of all three objects in one go.
-  __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+  __ Allocate(ebx, eax, edi, no_reg, &runtime, NO_ALLOCATION_FLAGS);
 
   // eax = address of new object(s) (tagged)
   // ecx = argument count (smi-tagged)
@@ -4883,19 +4756,19 @@
   // -----------------------------------
   __ AssertFunction(edi);
 
-  // For Ignition we need to skip all possible handler/stub frames until
-  // we reach the JavaScript frame for the function (similar to what the
-  // runtime fallback implementation does). So make edx point to that
-  // JavaScript frame.
-  {
-    Label loop, loop_entry;
-    __ mov(edx, ebp);
-    __ jmp(&loop_entry, Label::kNear);
-    __ bind(&loop);
+  // Make edx point to the JavaScript frame.
+  __ mov(edx, ebp);
+  if (skip_stub_frame()) {
+    // For Ignition we need to skip the handler/stub frame to reach the
+    // JavaScript frame for the function.
     __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-    __ bind(&loop_entry);
+  }
+  if (FLAG_debug_code) {
+    Label ok;
     __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
-    __ j(not_equal, &loop);
+    __ j(equal, &ok);
+    __ Abort(kInvalidFrameForFastNewStrictArgumentsStub);
+    __ bind(&ok);
   }
 
   // Check if we have an arguments adaptor frame below the function frame.
@@ -4934,7 +4807,7 @@
   __ lea(ecx,
          Operand(eax, times_half_pointer_size,
                  JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+  __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in edx.
@@ -5354,9 +5227,14 @@
   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
   STATIC_ASSERT(FCA::kIsolateIndex == 1);
   STATIC_ASSERT(FCA::kHolderIndex == 0);
-  STATIC_ASSERT(FCA::kArgsLength == 7);
+  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+  STATIC_ASSERT(FCA::kArgsLength == 8);
 
   __ pop(return_address);
+
+  // new target
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
+
   // context save.
   __ push(context);
 
@@ -5401,7 +5279,7 @@
 
   // Allocate the v8::Arguments structure in the arguments' space since
   // it's not controlled by GC.
-  const int kApiStackSpace = 4;
+  const int kApiStackSpace = 3;
 
   PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
 
@@ -5412,8 +5290,6 @@
   __ mov(ApiParameterOperand(3), scratch);
   // FunctionCallbackInfo::length_.
   __ Move(ApiParameterOperand(4), Immediate(argc()));
-  // FunctionCallbackInfo::is_construct_call_.
-  __ Move(ApiParameterOperand(5), Immediate(0));
 
   // v8::InvocationCallback's argument.
   __ lea(scratch, ApiParameterOperand(2));
@@ -5433,8 +5309,8 @@
   }
   Operand return_value_operand(ebp, return_value_offset * kPointerSize);
   int stack_space = 0;
-  Operand is_construct_call_operand = ApiParameterOperand(5);
-  Operand* stack_space_operand = &is_construct_call_operand;
+  Operand length_operand = ApiParameterOperand(4);
+  Operand* stack_space_operand = &length_operand;
   stack_space = argc() + FCA::kArgsLength + 1;
   stack_space_operand = nullptr;
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -5445,14 +5321,34 @@
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- esp[0]                        : return address
-  //  -- esp[4]                        : name
-  //  -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
-  //  -- ...
-  //  -- edx                           : api_function_address
-  // -----------------------------------
-  DCHECK(edx.is(ApiGetterDescriptor::function_address()));
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+  Register receiver = ApiGetterDescriptor::ReceiverRegister();
+  Register holder = ApiGetterDescriptor::HolderRegister();
+  Register callback = ApiGetterDescriptor::CallbackRegister();
+  Register scratch = ebx;
+  DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+  __ pop(scratch);  // Pop return address to extend the frame.
+  __ push(receiver);
+  __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
+  __ PushRoot(Heap::kUndefinedValueRootIndex);  // ReturnValue
+  // ReturnValue default value
+  __ PushRoot(Heap::kUndefinedValueRootIndex);
+  __ push(Immediate(ExternalReference::isolate_address(isolate())));
+  __ push(holder);
+  __ push(Immediate(Smi::FromInt(0)));  // should_throw_on_error -> false
+  __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
+  __ push(scratch);  // Restore return address.
 
   // v8::PropertyCallbackInfo::args_ array and name handle.
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5462,9 +5358,6 @@
   // active) in non-GCed stack space.
   const int kApiArgc = 3 + 1;
 
-  Register api_function_address = edx;
-  Register scratch = ebx;
-
   // Load address of v8::PropertyAccessorInfo::args_ array.
   __ lea(scratch, Operand(esp, 2 * kPointerSize));
 
@@ -5474,25 +5367,30 @@
   Operand info_object = ApiParameterOperand(3);
   __ mov(info_object, scratch);
 
+  // Name as handle.
   __ sub(scratch, Immediate(kPointerSize));
-  __ mov(ApiParameterOperand(0), scratch);  // name.
+  __ mov(ApiParameterOperand(0), scratch);
+  // Arguments pointer.
   __ lea(scratch, info_object);
-  __ mov(ApiParameterOperand(1), scratch);  // arguments pointer.
+  __ mov(ApiParameterOperand(1), scratch);
   // Reserve space for optional callback address parameter.
   Operand thunk_last_arg = ApiParameterOperand(2);
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+  Register function_address = edx;
+  __ mov(function_address,
+         FieldOperand(scratch, Foreign::kForeignAddressOffset));
   // +3 is to skip prolog, return address and name handle.
   Operand return_value_operand(
       ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
-  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
-                           thunk_last_arg, kStackUnwindSpace, nullptr,
-                           return_value_operand, NULL);
+  CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
+                           kStackUnwindSpace, nullptr, return_value_operand,
+                           NULL);
 }
 
-
 #undef __
 
 }  // namespace internal
diff --git a/src/x87/codegen-x87.cc b/src/x87/codegen-x87.cc
index 776edeb..8112d11 100644
--- a/src/x87/codegen-x87.cc
+++ b/src/x87/codegen-x87.cc
@@ -275,8 +275,7 @@
   // Allocate new FixedDoubleArray.
   // edx: receiver
   // edi: length of source FixedArray (smi-tagged)
-  AllocationFlags flags =
-      static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+  AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
   __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
               REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
 
@@ -391,7 +390,7 @@
   // Allocate new FixedArray.
   // ebx: length of source FixedDoubleArray (smi-tagged)
   __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
-  __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+  __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
 
   // eax: destination FixedArray
   // ebx: number of elements
diff --git a/src/x87/disasm-x87.cc b/src/x87/disasm-x87.cc
index 91ce227..2a90df9 100644
--- a/src/x87/disasm-x87.cc
+++ b/src/x87/disasm-x87.cc
@@ -8,6 +8,7 @@
 
 #if V8_TARGET_ARCH_X87
 
+#include "src/base/compiler-specific.h"
 #include "src/disasm.h"
 
 namespace disasm {
@@ -29,18 +30,19 @@
 };
 
 static const ByteMnemonic two_operands_instr[] = {
-    {0x01, "add", OPER_REG_OP_ORDER},   {0x03, "add", REG_OPER_OP_ORDER},
-    {0x09, "or", OPER_REG_OP_ORDER},    {0x0B, "or", REG_OPER_OP_ORDER},
-    {0x13, "adc", REG_OPER_OP_ORDER},   {0x1B, "sbb", REG_OPER_OP_ORDER},
-    {0x21, "and", OPER_REG_OP_ORDER},   {0x23, "and", REG_OPER_OP_ORDER},
-    {0x29, "sub", OPER_REG_OP_ORDER},   {0x2A, "subb", REG_OPER_OP_ORDER},
-    {0x2B, "sub", REG_OPER_OP_ORDER},   {0x31, "xor", OPER_REG_OP_ORDER},
-    {0x33, "xor", REG_OPER_OP_ORDER},   {0x38, "cmpb", OPER_REG_OP_ORDER},
-    {0x39, "cmp", OPER_REG_OP_ORDER},   {0x3A, "cmpb", REG_OPER_OP_ORDER},
-    {0x3B, "cmp", REG_OPER_OP_ORDER},   {0x84, "test_b", REG_OPER_OP_ORDER},
-    {0x85, "test", REG_OPER_OP_ORDER},  {0x87, "xchg", REG_OPER_OP_ORDER},
-    {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
-    {0x8D, "lea", REG_OPER_OP_ORDER},   {-1, "", UNSET_OP_ORDER}};
+    {0x01, "add", OPER_REG_OP_ORDER},  {0x03, "add", REG_OPER_OP_ORDER},
+    {0x09, "or", OPER_REG_OP_ORDER},   {0x0B, "or", REG_OPER_OP_ORDER},
+    {0x13, "adc", REG_OPER_OP_ORDER},  {0x1B, "sbb", REG_OPER_OP_ORDER},
+    {0x21, "and", OPER_REG_OP_ORDER},  {0x23, "and", REG_OPER_OP_ORDER},
+    {0x29, "sub", OPER_REG_OP_ORDER},  {0x2A, "subb", REG_OPER_OP_ORDER},
+    {0x2B, "sub", REG_OPER_OP_ORDER},  {0x31, "xor", OPER_REG_OP_ORDER},
+    {0x33, "xor", REG_OPER_OP_ORDER},  {0x38, "cmpb", OPER_REG_OP_ORDER},
+    {0x39, "cmp", OPER_REG_OP_ORDER},  {0x3A, "cmpb", REG_OPER_OP_ORDER},
+    {0x3B, "cmp", REG_OPER_OP_ORDER},  {0x84, "test_b", REG_OPER_OP_ORDER},
+    {0x85, "test", REG_OPER_OP_ORDER}, {0x86, "xchg_b", REG_OPER_OP_ORDER},
+    {0x87, "xchg", REG_OPER_OP_ORDER}, {0x8A, "mov_b", REG_OPER_OP_ORDER},
+    {0x8B, "mov", REG_OPER_OP_ORDER},  {0x8D, "lea", REG_OPER_OP_ORDER},
+    {-1, "", UNSET_OP_ORDER}};
 
 static const ByteMnemonic zero_operands_instr[] = {
   {0xC3, "ret", UNSET_OP_ORDER},
@@ -325,8 +327,7 @@
   int FPUInstruction(byte* data);
   int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
   int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
-  void AppendToBuffer(const char* format, ...);
-
+  PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
 
   void UnimplementedInstruction() {
     if (abort_on_unimplemented_) {
@@ -948,7 +949,7 @@
   const InstructionDesc& idesc = instruction_table_->Get(*data);
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
-      AppendToBuffer(idesc.mnem);
+      AppendToBuffer("%s", idesc.mnem);
       data++;
       break;
 
@@ -1267,6 +1268,13 @@
         } else if (*data == 0x8B) {
           data++;
           data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+        } else if (*data == 0x87) {
+          data++;
+          int mod, regop, rm;
+          get_modrm(*data, &mod, &regop, &rm);
+          AppendToBuffer("xchg_w ");
+          data += PrintRightOperand(data);
+          AppendToBuffer(",%s", NameOfCPURegister(regop));
         } else if (*data == 0x89) {
           data++;
           int mod, regop, rm;
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
index e41d42c..260d871 100644
--- a/src/x87/interface-descriptors-x87.cc
+++ b/src/x87/interface-descriptors-x87.cc
@@ -51,16 +51,11 @@
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
 
 
-const Register InstanceOfDescriptor::LeftRegister() { return edx; }
-const Register InstanceOfDescriptor::RightRegister() { return eax; }
-
-
 const Register StringCompareDescriptor::LeftRegister() { return edx; }
 const Register StringCompareDescriptor::RightRegister() { return eax; }
 
-
-const Register ApiGetterDescriptor::function_address() { return edx; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
+const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
 
 const Register MathPowTaggedDescriptor::exponent() { return eax; }
 
@@ -73,6 +68,8 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
 
+const Register HasPropertyDescriptor::ObjectRegister() { return eax; }
+const Register HasPropertyDescriptor::KeyRegister() { return ebx; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -250,13 +247,16 @@
 SIMD128_TYPES(SIMD128_ALLOC_DESC)
 #undef SIMD128_ALLOC_DESC
 
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  // register state
+  // eax -- number of arguments
+  // edi -- function
+  // ebx -- allocation site with elements kind
+  Register registers[] = {edi, ebx, eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
 void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
@@ -320,6 +320,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
+void CountOpDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {eax};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void StringAddDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -380,8 +385,8 @@
 void InterpreterDispatchDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
-      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
-      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister};
+      kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+      kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -416,6 +421,16 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      eax,  // the value to pass to the generator
+      ebx,  // the JSGeneratorObject to resume
+      edx   // the resume mode (tagged)
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index b46167d..3cee0ea 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -973,6 +973,17 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    test(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+    Push(object);
+    CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
+    Pop(object);
+    Check(equal, kOperandIsNotAGeneratorObject);
+  }
+}
+
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
     test(object, Immediate(kSmiTagMask));
@@ -1458,6 +1469,7 @@
                               AllocationFlags flags) {
   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1499,26 +1511,23 @@
 
   // Calculate new top and bail out if space is exhausted.
   Register top_reg = result_end.is_valid() ? result_end : result;
+
   if (!top_reg.is(result)) {
     mov(top_reg, result);
   }
   add(top_reg, Immediate(object_size));
-  j(carry, gc_required);
   cmp(top_reg, Operand::StaticVariable(allocation_limit));
   j(above, gc_required);
 
-  // Update allocation top.
-  UpdateAllocationTopHelper(top_reg, scratch, flags);
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    UpdateAllocationTopHelper(top_reg, scratch, flags);
+  }
 
-  // Tag result if requested.
-  bool tag_result = (flags & TAG_OBJECT) != 0;
   if (top_reg.is(result)) {
-    if (tag_result) {
-      sub(result, Immediate(object_size - kHeapObjectTag));
-    } else {
-      sub(result, Immediate(object_size));
-    }
-  } else if (tag_result) {
+    sub(result, Immediate(object_size - kHeapObjectTag));
+  } else {
+    // Tag the result.
     DCHECK(kHeapObjectTag == 1);
     inc(result);
   }
@@ -1535,6 +1544,8 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & SIZE_IN_WORDS) == 0);
+  DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1592,16 +1603,14 @@
   cmp(result_end, Operand::StaticVariable(allocation_limit));
   j(above, gc_required);
 
-  if ((flags & TAG_OBJECT) != 0) {
-    DCHECK(kHeapObjectTag == 1);
-    inc(result);
-  }
+  // Tag result.
+  DCHECK(kHeapObjectTag == 1);
+  inc(result);
 
   // Update allocation top.
   UpdateAllocationTopHelper(result_end, scratch, flags);
 }
 
-
 void MacroAssembler::Allocate(Register object_size,
                               Register result,
                               Register result_end,
@@ -1609,6 +1618,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1652,20 +1662,66 @@
     mov(result_end, object_size);
   }
   add(result_end, result);
-  j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(allocation_limit));
   j(above, gc_required);
 
-  // Tag result if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    DCHECK(kHeapObjectTag == 1);
-    inc(result);
-  }
+  // Tag result.
+  DCHECK(kHeapObjectTag == 1);
+  inc(result);
 
-  // Update allocation top.
-  UpdateAllocationTopHelper(result_end, scratch, flags);
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    UpdateAllocationTopHelper(result_end, scratch, flags);
+  }
 }
 
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register result_end, AllocationFlags flags) {
+  DCHECK(!result.is(result_end));
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, no_reg, flags);
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    Label aligned;
+    test(result, Immediate(kDoubleAlignmentMask));
+    j(zero, &aligned, Label::kNear);
+    mov(Operand(result, 0),
+        Immediate(isolate()->factory()->one_pointer_filler_map()));
+    add(result, Immediate(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
+  lea(result_end, Operand(result, object_size));
+  UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+  DCHECK(kHeapObjectTag == 1);
+  inc(result);
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, AllocationFlags flags) {
+  DCHECK(!result.is(result_end));
+  // Load address of new object into result.
+  LoadAllocationTopHelper(result, no_reg, flags);
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    Label aligned;
+    test(result, Immediate(kDoubleAlignmentMask));
+    j(zero, &aligned, Label::kNear);
+    mov(Operand(result, 0),
+        Immediate(isolate()->factory()->one_pointer_filler_map()));
+    add(result, Immediate(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
+  lea(result_end, Operand(result, object_size, times_1, 0));
+  UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+  DCHECK(kHeapObjectTag == 1);
+  inc(result);
+}
 
 void MacroAssembler::AllocateHeapNumber(Register result,
                                         Register scratch1,
@@ -1674,7 +1730,7 @@
                                         MutableMode mode) {
   // Allocate heap number in new space.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   Handle<Map> map = mode == MUTABLE
       ? isolate()->factory()->mutable_heap_number_map()
@@ -1700,15 +1756,9 @@
   and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate two byte string in new space.
-  Allocate(SeqTwoByteString::kHeaderSize,
-           times_1,
-           scratch1,
-           REGISTER_VALUE_IS_INT32,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
+           REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1734,15 +1784,9 @@
   and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate one-byte string in new space.
-  Allocate(SeqOneByteString::kHeaderSize,
-           times_1,
-           scratch1,
-           REGISTER_VALUE_IS_INT32,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
+           REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1762,7 +1806,7 @@
 
   // Allocate one-byte string in new space.
   Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
-           gc_required, TAG_OBJECT);
+           gc_required, NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1780,7 +1824,7 @@
                                         Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1792,12 +1836,8 @@
                                                Register scratch1,
                                                Register scratch2,
                                                Label* gc_required) {
-  Allocate(ConsString::kSize,
-           result,
-           scratch1,
-           scratch2,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1811,7 +1851,7 @@
                                           Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1825,7 +1865,7 @@
                                                  Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1841,7 +1881,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch);
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index 5571413..42b7eb1 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -19,10 +19,11 @@
 const Register kReturnRegister2 = {Register::kCode_edi};
 const Register kJSFunctionRegister = {Register::kCode_edi};
 const Register kContextRegister = {Register::kCode_esi};
+const Register kAllocateSizeRegister = {Register::kCode_edx};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
 const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
 const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
 const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
 const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
 const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
@@ -499,6 +500,23 @@
     j(not_zero, not_smi_label, distance);
   }
 
+  // Jump if the value cannot be represented by a smi.
+  inline void JumpIfNotValidSmiValue(Register value, Register scratch,
+                                     Label* on_invalid,
+                                     Label::Distance distance = Label::kFar) {
+    mov(scratch, value);
+    add(scratch, Immediate(0x40000000U));
+    j(sign, on_invalid, distance);
+  }
+
+  // Jump if the unsigned integer value cannot be represented by a smi.
+  inline void JumpIfUIntNotValidSmiValue(
+      Register value, Label* on_invalid,
+      Label::Distance distance = Label::kFar) {
+    cmp(value, Immediate(0x40000000U));
+    j(above_equal, on_invalid, distance);
+  }
+
   void LoadInstanceDescriptors(Register map, Register descriptors);
   void EnumLength(Register dst, Register map);
   void NumberOfOwnDescriptors(Register dst, Register map);
@@ -552,6 +570,10 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSGeneratorObject,
+  // enabled via --debug-code.
+  void AssertGeneratorObject(Register object);
+
   // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
   void AssertReceiver(Register object);
 
@@ -607,6 +629,14 @@
   void Allocate(Register object_size, Register result, Register result_end,
                 Register scratch, Label* gc_required, AllocationFlags flags);
 
+  // FastAllocate is right now only used for folded allocations. It just
+  // increments the top pointer without checking against limit. This can only
+  // be done if it was proved earlier that the allocation will succeed.
+  void FastAllocate(int object_size, Register result, Register result_end,
+                    AllocationFlags flags);
+  void FastAllocate(Register object_size, Register result, Register result_end,
+                    AllocationFlags flags);
+
   // Allocate a heap number in new space with undefined value. The
   // register scratch2 can be passed as no_reg; the others must be
   // valid registers. Returns tagged pointer in result register, or